[PATCH] slab: fix code formatting
authorPekka Enberg <penberg@cs.helsinki.fi>
Sun, 8 Jan 2006 09:00:37 +0000 (01:00 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 9 Jan 2006 04:12:39 +0000 (20:12 -0800)
The slab allocator code is inconsistent in coding style and messy.  For this
patch, I ran Lindent for mm/slab.c and fixed up goofs by hand.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/slab.c

index 2551b1e..f71d8be 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #define        FORCED_DEBUG    0
 #endif
 
-
 /* Shouldn't this be in a header file somewhere? */
 #define        BYTES_PER_WORD          sizeof(void *)
 
@@ -217,12 +216,12 @@ static unsigned long offslab_limit;
  * Slabs are chained into three list: fully used, partial, fully free slabs.
  */
 struct slab {
-       struct list_head        list;
-       unsigned long           colouroff;
-       void                    *s_mem;         /* including colour offset */
-       unsigned int            inuse;          /* num of objs active in slab */
-       kmem_bufctl_t           free;
-       unsigned short          nodeid;
+       struct list_head list;
+       unsigned long colouroff;
+       void *s_mem;            /* including colour offset */
+       unsigned int inuse;     /* num of objs active in slab */
+       kmem_bufctl_t free;
+       unsigned short nodeid;
 };
 
 /*
@@ -242,9 +241,9 @@ struct slab {
  * We assume struct slab_rcu can overlay struct slab when destroying.
  */
 struct slab_rcu {
-       struct rcu_head         head;
-       kmem_cache_t            *cachep;
-       void                    *addr;
+       struct rcu_head head;
+       kmem_cache_t *cachep;
+       void *addr;
 };
 
 /*
@@ -279,23 +278,23 @@ struct array_cache {
 #define BOOT_CPUCACHE_ENTRIES  1
 struct arraycache_init {
        struct array_cache cache;
-       void * entries[BOOT_CPUCACHE_ENTRIES];
+       void *entries[BOOT_CPUCACHE_ENTRIES];
 };
 
 /*
  * The slab lists for all objects.
  */
 struct kmem_list3 {
-       struct list_head        slabs_partial;  /* partial list first, better asm code */
-       struct list_head        slabs_full;
-       struct list_head        slabs_free;
-       unsigned long   free_objects;
-       unsigned long   next_reap;
-       int             free_touched;
-       unsigned int    free_limit;
-       spinlock_t      list_lock;
-       struct array_cache      *shared;        /* shared per node */
-       struct array_cache      **alien;        /* on other nodes */
+       struct list_head slabs_partial; /* partial list first, better asm code */
+       struct list_head slabs_full;
+       struct list_head slabs_free;
+       unsigned long free_objects;
+       unsigned long next_reap;
+       int free_touched;
+       unsigned int free_limit;
+       spinlock_t list_lock;
+       struct array_cache *shared;     /* shared per node */
+       struct array_cache **alien;     /* on other nodes */
 };
 
 /*
@@ -367,63 +366,63 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
  *
  * manages a cache.
  */
-       
+
 struct kmem_cache {
 /* 1) per-cpu data, touched during every alloc/free */
-       struct array_cache      *array[NR_CPUS];
-       unsigned int            batchcount;
-       unsigned int            limit;
-       unsigned int            shared;
-       unsigned int            objsize;
+       struct array_cache *array[NR_CPUS];
+       unsigned int batchcount;
+       unsigned int limit;
+       unsigned int shared;
+       unsigned int objsize;
 /* 2) touched by every alloc & free from the backend */
-       struct kmem_list3       *nodelists[MAX_NUMNODES];
-       unsigned int            flags;  /* constant flags */
-       unsigned int            num;    /* # of objs per slab */
-       spinlock_t              spinlock;
+       struct kmem_list3 *nodelists[MAX_NUMNODES];
+       unsigned int flags;     /* constant flags */
+       unsigned int num;       /* # of objs per slab */
+       spinlock_t spinlock;
 
 /* 3) cache_grow/shrink */
        /* order of pgs per slab (2^n) */
-       unsigned int            gfporder;
+       unsigned int gfporder;
 
        /* force GFP flags, e.g. GFP_DMA */
-       gfp_t                   gfpflags;
+       gfp_t gfpflags;
 
-       size_t                  colour;         /* cache colouring range */
-       unsigned int            colour_off;     /* colour offset */
-       unsigned int            colour_next;    /* cache colouring */
-       kmem_cache_t            *slabp_cache;
-       unsigned int            slab_size;
-       unsigned int            dflags;         /* dynamic flags */
+       size_t colour;          /* cache colouring range */
+       unsigned int colour_off;        /* colour offset */
+       unsigned int colour_next;       /* cache colouring */
+       kmem_cache_t *slabp_cache;
+       unsigned int slab_size;
+       unsigned int dflags;    /* dynamic flags */
 
        /* constructor func */
-       void (*ctor)(void *, kmem_cache_t *, unsigned long);
+       void (*ctor) (void *, kmem_cache_t *, unsigned long);
 
        /* de-constructor func */
-       void (*dtor)(void *, kmem_cache_t *, unsigned long);
+       void (*dtor) (void *, kmem_cache_t *, unsigned long);
 
 /* 4) cache creation/removal */
-       const char              *name;
-       struct list_head        next;
+       const char *name;
+       struct list_head next;
 
 /* 5) statistics */
 #if STATS
-       unsigned long           num_active;
-       unsigned long           num_allocations;
-       unsigned long           high_mark;
-       unsigned long           grown;
-       unsigned long           reaped;
-       unsigned long           errors;
-       unsigned long           max_freeable;
-       unsigned long           node_allocs;
-       unsigned long           node_frees;
-       atomic_t                allochit;
-       atomic_t                allocmiss;
-       atomic_t                freehit;
-       atomic_t                freemiss;
+       unsigned long num_active;
+       unsigned long num_allocations;
+       unsigned long high_mark;
+       unsigned long grown;
+       unsigned long reaped;
+       unsigned long errors;
+       unsigned long max_freeable;
+       unsigned long node_allocs;
+       unsigned long node_frees;
+       atomic_t allochit;
+       atomic_t allocmiss;
+       atomic_t freehit;
+       atomic_t freemiss;
 #endif
 #if DEBUG
-       int                     dbghead;
-       int                     reallen;
+       int dbghead;
+       int reallen;
 #endif
 };
 
@@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
        if (cachep->flags & SLAB_STORE_USER)
-               return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
-       return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
+               return (unsigned long *)(objp + cachep->objsize -
+                                        2 * BYTES_PER_WORD);
+       return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
 }
 
 static void **dbg_userword(kmem_cache_t *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_STORE_USER));
-       return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
+       return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
 }
 
 #else
@@ -607,31 +607,31 @@ struct cache_names {
 static struct cache_names __initdata cache_names[] = {
 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
 #include <linux/kmalloc_sizes.h>
-       { NULL, }
+       {NULL,}
 #undef CACHE
 };
 
 static struct arraycache_init initarray_cache __initdata =
-       { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 static struct arraycache_init initarray_generic =
-       { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 
 /* internal cache of cache description objs */
 static kmem_cache_t cache_cache = {
-       .batchcount     = 1,
-       .limit          = BOOT_CPUCACHE_ENTRIES,
-       .shared         = 1,
-       .objsize        = sizeof(kmem_cache_t),
-       .flags          = SLAB_NO_REAP,
-       .spinlock       = SPIN_LOCK_UNLOCKED,
-       .name           = "kmem_cache",
+       .batchcount = 1,
+       .limit = BOOT_CPUCACHE_ENTRIES,
+       .shared = 1,
+       .objsize = sizeof(kmem_cache_t),
+       .flags = SLAB_NO_REAP,
+       .spinlock = SPIN_LOCK_UNLOCKED,
+       .name = "kmem_cache",
 #if DEBUG
-       .reallen        = sizeof(kmem_cache_t),
+       .reallen = sizeof(kmem_cache_t),
 #endif
 };
 
 /* Guard access to the cache-chain. */
-static struct semaphore        cache_chain_sem;
+static struct semaphore cache_chain_sem;
 static struct list_head cache_chain;
 
 /*
@@ -655,9 +655,9 @@ static enum {
 
 static DEFINE_PER_CPU(struct work_struct, reap_work);
 
-static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
-static void enable_cpucache (kmem_cache_t *cachep);
-static void cache_reap (void *unused);
+static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
+static void enable_cpucache(kmem_cache_t *cachep);
+static void cache_reap(void *unused);
 static int __node_shrink(kmem_cache_t *cachep, int node);
 
 static inline struct array_cache *ac_data(kmem_cache_t *cachep)
@@ -671,9 +671,9 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
 
 #if DEBUG
        /* This happens if someone tries to call
-       * kmem_cache_create(), or __kmalloc(), before
-       * the generic caches are initialized.
-       */
+        * kmem_cache_create(), or __kmalloc(), before
+        * the generic caches are initialized.
+        */
        BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
 #endif
        while (size > csizep->cs_size)
@@ -697,10 +697,10 @@ EXPORT_SYMBOL(kmem_find_general_cachep);
 
 /* Cal the num objs, wastage, and bytes left over for a given slab size. */
 static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
-                int flags, size_t *left_over, unsigned int *num)
+                          int flags, size_t *left_over, unsigned int *num)
 {
        int i;
-       size_t wastage = PAGE_SIZE<<gfporder;
+       size_t wastage = PAGE_SIZE << gfporder;
        size_t extra = 0;
        size_t base = 0;
 
@@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
                extra = sizeof(kmem_bufctl_t);
        }
        i = 0;
-       while (i*size + ALIGN(base+i*extra, align) <= wastage)
+       while (i * size + ALIGN(base + i * extra, align) <= wastage)
                i++;
        if (i > 0)
                i--;
@@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
                i = SLAB_LIMIT;
 
        *num = i;
-       wastage -= i*size;
-       wastage -= ALIGN(base+i*extra, align);
+       wastage -= i * size;
+       wastage -= ALIGN(base + i * extra, align);
        *left_over = wastage;
 }
 
@@ -728,7 +728,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
 static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
 {
        printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
-               function, cachep->name, msg);
+              function, cachep->name, msg);
        dump_stack();
 }
 
@@ -755,9 +755,9 @@ static void __devinit start_cpu_timer(int cpu)
 }
 
 static struct array_cache *alloc_arraycache(int node, int entries,
-                                               int batchcount)
+                                           int batchcount)
 {
-       int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
+       int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
        struct array_cache *nc = NULL;
 
        nc = kmalloc_node(memsize, GFP_KERNEL, node);
@@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
 static inline struct array_cache **alloc_alien_cache(int node, int limit)
 {
        struct array_cache **ac_ptr;
-       int memsize = sizeof(void*)*MAX_NUMNODES;
+       int memsize = sizeof(void *) * MAX_NUMNODES;
        int i;
 
        if (limit > 1)
@@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
                        }
                        ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
                        if (!ac_ptr[i]) {
-                               for (i--; i <=0; i--)
+                               for (i--; i <= 0; i--)
                                        kfree(ac_ptr[i]);
                                kfree(ac_ptr);
                                return NULL;
@@ -807,12 +807,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
                return;
 
        for_each_node(i)
-               kfree(ac_ptr[i]);
+           kfree(ac_ptr[i]);
 
        kfree(ac_ptr);
 }
 
-static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node)
+static inline void __drain_alien_cache(kmem_cache_t *cachep,
+                                      struct array_cache *ac, int node)
 {
        struct kmem_list3 *rl3 = cachep->nodelists[node];
 
@@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
 
 static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
 {
-       int i=0;
+       int i = 0;
        struct array_cache *ac;
        unsigned long flags;
 
@@ -846,10 +847,10 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
 #endif
 
 static int __devinit cpuup_callback(struct notifier_block *nfb,
-                                 unsigned long action, void *hcpu)
+                                   unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
-       kmem_cache_tcachep;
+       kmem_cache_t *cachep;
        struct kmem_list3 *l3 = NULL;
        int node = cpu_to_node(cpu);
        int memsize = sizeof(struct kmem_list3);
@@ -871,27 +872,27 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                         */
                        if (!cachep->nodelists[node]) {
                                if (!(l3 = kmalloc_node(memsize,
-                                               GFP_KERNEL, node)))
+                                                       GFP_KERNEL, node)))
                                        goto bad;
                                kmem_list3_init(l3);
                                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
-                                 ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
 
                                cachep->nodelists[node] = l3;
                        }
 
                        spin_lock_irq(&cachep->nodelists[node]->list_lock);
                        cachep->nodelists[node]->free_limit =
-                               (1 + nr_cpus_node(node)) *
-                               cachep->batchcount + cachep->num;
+                           (1 + nr_cpus_node(node)) *
+                           cachep->batchcount + cachep->num;
                        spin_unlock_irq(&cachep->nodelists[node]->list_lock);
                }
 
                /* Now we can go ahead with allocating the shared array's
-                 & array cache's */
+                  & array cache's */
                list_for_each_entry(cachep, &cache_chain, next) {
                        nc = alloc_arraycache(node, cachep->limit,
-                                       cachep->batchcount);
+                                             cachep->batchcount);
                        if (!nc)
                                goto bad;
                        cachep->array[cpu] = nc;
@@ -900,12 +901,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                        BUG_ON(!l3);
                        if (!l3->shared) {
                                if (!(nc = alloc_arraycache(node,
-                                       cachep->shared*cachep->batchcount,
-                                       0xbaadf00d)))
-                                       goto  bad;
+                                                           cachep->shared *
+                                                           cachep->batchcount,
+                                                           0xbaadf00d)))
+                                       goto bad;
 
                                /* we are serialised from CPU_DEAD or
-                                 CPU_UP_CANCELLED by the cpucontrol lock */
+                                  CPU_UP_CANCELLED by the cpucontrol lock */
                                l3->shared = nc;
                        }
                }
@@ -942,13 +944,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                                free_block(cachep, nc->entry, nc->avail, node);
 
                        if (!cpus_empty(mask)) {
-                                spin_unlock(&l3->list_lock);
-                                goto unlock_cache;
-                        }
+                               spin_unlock(&l3->list_lock);
+                               goto unlock_cache;
+                       }
 
                        if (l3->shared) {
                                free_block(cachep, l3->shared->entry,
-                                               l3->shared->avail, node);
+                                          l3->shared->avail, node);
                                kfree(l3->shared);
                                l3->shared = NULL;
                        }
@@ -966,7 +968,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                        } else {
                                spin_unlock(&l3->list_lock);
                        }
-unlock_cache:
+                     unlock_cache:
                        spin_unlock_irq(&cachep->spinlock);
                        kfree(nc);
                }
@@ -975,7 +977,7 @@ unlock_cache:
 #endif
        }
        return NOTIFY_OK;
-bad:
+      bad:
        up(&cache_chain_sem);
        return NOTIFY_BAD;
 }
@@ -985,8 +987,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
 /*
  * swap the static kmem_list3 with kmalloced memory
  */
-static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list,
-               int nodeid)
+static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
 {
        struct kmem_list3 *ptr;
 
@@ -1055,14 +1056,14 @@ void __init kmem_cache_init(void)
        cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
 
        cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
-                               &left_over, &cache_cache.num);
+                      &left_over, &cache_cache.num);
        if (!cache_cache.num)
                BUG();
 
-       cache_cache.colour = left_over/cache_cache.colour_off;
+       cache_cache.colour = left_over / cache_cache.colour_off;
        cache_cache.colour_next = 0;
-       cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) +
-                               sizeof(struct slab), cache_line_size());
+       cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
+                                     sizeof(struct slab), cache_line_size());
 
        /* 2+3) create the kmalloc caches */
        sizes = malloc_sizes;
@@ -1074,14 +1075,18 @@ void __init kmem_cache_init(void)
         */
 
        sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
-                               sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN,
-                               (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+                                                     sizes[INDEX_AC].cs_size,
+                                                     ARCH_KMALLOC_MINALIGN,
+                                                     (ARCH_KMALLOC_FLAGS |
+                                                      SLAB_PANIC), NULL, NULL);
 
        if (INDEX_AC != INDEX_L3)
                sizes[INDEX_L3].cs_cachep =
-                       kmem_cache_create(names[INDEX_L3].name,
-                               sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN,
-                               (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+                   kmem_cache_create(names[INDEX_L3].name,
+                                     sizes[INDEX_L3].cs_size,
+                                     ARCH_KMALLOC_MINALIGN,
+                                     (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
+                                     NULL);
 
        while (sizes->cs_size != ULONG_MAX) {
                /*
@@ -1091,35 +1096,41 @@ void __init kmem_cache_init(void)
                 * Note for systems short on memory removing the alignment will
                 * allow tighter packing of the smaller caches.
                 */
-               if(!sizes->cs_cachep)
+               if (!sizes->cs_cachep)
                        sizes->cs_cachep = kmem_cache_create(names->name,
-                               sizes->cs_size, ARCH_KMALLOC_MINALIGN,
-                               (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+                                                            sizes->cs_size,
+                                                            ARCH_KMALLOC_MINALIGN,
+                                                            (ARCH_KMALLOC_FLAGS
+                                                             | SLAB_PANIC),
+                                                            NULL, NULL);
 
                /* Inc off-slab bufctl limit until the ceiling is hit. */
                if (!(OFF_SLAB(sizes->cs_cachep))) {
-                       offslab_limit = sizes->cs_size-sizeof(struct slab);
+                       offslab_limit = sizes->cs_size - sizeof(struct slab);
                        offslab_limit /= sizeof(kmem_bufctl_t);
                }
 
                sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
-                       sizes->cs_size, ARCH_KMALLOC_MINALIGN,
-                       (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
-                       NULL, NULL);
+                                                       sizes->cs_size,
+                                                       ARCH_KMALLOC_MINALIGN,
+                                                       (ARCH_KMALLOC_FLAGS |
+                                                        SLAB_CACHE_DMA |
+                                                        SLAB_PANIC), NULL,
+                                                       NULL);
 
                sizes++;
                names++;
        }
        /* 4) Replace the bootstrap head arrays */
        {
-               void * ptr;
+               void *ptr;
 
                ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
                local_irq_disable();
                BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
                memcpy(ptr, ac_data(&cache_cache),
-                               sizeof(struct arraycache_init));
+                      sizeof(struct arraycache_init));
                cache_cache.array[smp_processor_id()] = ptr;
                local_irq_enable();
 
@@ -1127,11 +1138,11 @@ void __init kmem_cache_init(void)
 
                local_irq_disable();
                BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
-                               != &initarray_generic.cache);
+                      != &initarray_generic.cache);
                memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
-                               sizeof(struct arraycache_init));
+                      sizeof(struct arraycache_init));
                malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
-                                               ptr;
+                   ptr;
                local_irq_enable();
        }
        /* 5) Replace the bootstrap kmem_list3's */
@@ -1139,16 +1150,16 @@ void __init kmem_cache_init(void)
                int node;
                /* Replace the static kmem_list3 structures for the boot cpu */
                init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
-                               numa_node_id());
+                         numa_node_id());
 
                for_each_online_node(node) {
                        init_list(malloc_sizes[INDEX_AC].cs_cachep,
-                                       &initkmem_list3[SIZE_AC+node], node);
+                                 &initkmem_list3[SIZE_AC + node], node);
 
                        if (INDEX_AC != INDEX_L3) {
                                init_list(malloc_sizes[INDEX_L3].cs_cachep,
-                                               &initkmem_list3[SIZE_L3+node],
-                                               node);
+                                         &initkmem_list3[SIZE_L3 + node],
+                                         node);
                        }
                }
        }
@@ -1158,7 +1169,7 @@ void __init kmem_cache_init(void)
                kmem_cache_t *cachep;
                down(&cache_chain_sem);
                list_for_each_entry(cachep, &cache_chain, next)
-                       enable_cpucache(cachep);
+                   enable_cpucache(cachep);
                up(&cache_chain_sem);
        }
 
@@ -1184,7 +1195,7 @@ static int __init cpucache_init(void)
         * pages to gfp.
         */
        for_each_online_cpu(cpu)
-               start_cpu_timer(cpu);
+           start_cpu_timer(cpu);
 
        return 0;
 }
@@ -1226,7 +1237,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
  */
 static void kmem_freepages(kmem_cache_t *cachep, void *addr)
 {
-       unsigned long i = (1<<cachep->gfporder);
+       unsigned long i = (1 << cachep->gfporder);
        struct page *page = virt_to_page(addr);
        const unsigned long nr_freed = i;
 
@@ -1239,13 +1250,13 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += nr_freed;
        free_pages((unsigned long)addr, cachep->gfporder);
-       if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 
-               atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
+       if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+               atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
 {
-       struct slab_rcu *slab_rcu = (struct slab_rcu *) head;
+       struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
        kmem_cache_t *cachep = slab_rcu->cachep;
 
        kmem_freepages(cachep, slab_rcu->addr);
@@ -1257,19 +1268,19 @@ static void kmem_rcu_free(struct rcu_head *head)
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
-                               unsigned long caller)
+                           unsigned long caller)
 {
        int size = obj_reallen(cachep);
 
-       addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)];
+       addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
 
-       if (size < 5*sizeof(unsigned long))
+       if (size < 5 * sizeof(unsigned long))
                return;
 
-       *addr++=0x12345678;
-       *addr++=caller;
-       *addr++=smp_processor_id();
-       size -= 3*sizeof(unsigned long);
+       *addr++ = 0x12345678;
+       *addr++ = caller;
+       *addr++ = smp_processor_id();
+       size -= 3 * sizeof(unsigned long);
        {
                unsigned long *sptr = &caller;
                unsigned long svalue;
@@ -1277,7 +1288,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
                while (!kstack_end(sptr)) {
                        svalue = *sptr++;
                        if (kernel_text_address(svalue)) {
-                               *addr++=svalue;
+                               *addr++ = svalue;
                                size -= sizeof(unsigned long);
                                if (size <= sizeof(unsigned long))
                                        break;
@@ -1285,25 +1296,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
                }
 
        }
-       *addr++=0x87654321;
+       *addr++ = 0x87654321;
 }
 #endif
 
 static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
 {
        int size = obj_reallen(cachep);
-       addr = &((char*)addr)[obj_dbghead(cachep)];
+       addr = &((char *)addr)[obj_dbghead(cachep)];
 
        memset(addr, val, size);
-       *(unsigned char *)(addr+size-1) = POISON_END;
+       *(unsigned char *)(addr + size - 1) = POISON_END;
 }
 
 static void dump_line(char *data, int offset, int limit)
 {
        int i;
        printk(KERN_ERR "%03x:", offset);
-       for (i=0;i<limit;i++) {
-               printk(" %02x", (unsigned char)data[offset+i]);
+       for (i = 0; i < limit; i++) {
+               printk(" %02x", (unsigned char)data[offset + i]);
        }
        printk("\n");
 }
@@ -1318,24 +1329,24 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
 
        if (cachep->flags & SLAB_RED_ZONE) {
                printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
-                       *dbg_redzone1(cachep, objp),
-                       *dbg_redzone2(cachep, objp));
+                      *dbg_redzone1(cachep, objp),
+                      *dbg_redzone2(cachep, objp));
        }
 
        if (cachep->flags & SLAB_STORE_USER) {
                printk(KERN_ERR "Last user: [<%p>]",
-                               *dbg_userword(cachep, objp));
+                      *dbg_userword(cachep, objp));
                print_symbol("(%s)",
-                               (unsigned long)*dbg_userword(cachep, objp));
+                            (unsigned long)*dbg_userword(cachep, objp));
                printk("\n");
        }
-       realobj = (char*)objp+obj_dbghead(cachep);
+       realobj = (char *)objp + obj_dbghead(cachep);
        size = obj_reallen(cachep);
-       for (i=0; i<size && lines;i+=16, lines--) {
+       for (i = 0; i < size && lines; i += 16, lines--) {
                int limit;
                limit = 16;
-               if (i+limit > size)
-                       limit = size-i;
+               if (i + limit > size)
+                       limit = size - i;
                dump_line(realobj, i, limit);
        }
 }
@@ -1346,27 +1357,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
        int size, i;
        int lines = 0;
 
-       realobj = (char*)objp+obj_dbghead(cachep);
+       realobj = (char *)objp + obj_dbghead(cachep);
        size = obj_reallen(cachep);
 
-       for (i=0;i<size;i++) {
+       for (i = 0; i < size; i++) {
                char exp = POISON_FREE;
-               if (i == size-1)
+               if (i == size - 1)
                        exp = POISON_END;
                if (realobj[i] != exp) {
                        int limit;
                        /* Mismatch ! */
                        /* Print header */
                        if (lines == 0) {
-                               printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
-                                               realobj, size);
+                               printk(KERN_ERR
+                                      "Slab corruption: start=%p, len=%d\n",
+                                      realobj, size);
                                print_objinfo(cachep, objp, 0);
                        }
                        /* Hexdump the affected line */
-                       i = (i/16)*16;
+                       i = (i / 16) * 16;
                        limit = 16;
-                       if (i+limit > size)
-                               limit = size-i;
+                       if (i + limit > size)
+                               limit = size - i;
                        dump_line(realobj, i, limit);
                        i += 16;
                        lines++;
@@ -1382,19 +1394,19 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
                struct slab *slabp = page_get_slab(virt_to_page(objp));
                int objnr;
 
-               objnr = (objp-slabp->s_mem)/cachep->objsize;
+               objnr = (objp - slabp->s_mem) / cachep->objsize;
                if (objnr) {
-                       objp = slabp->s_mem+(objnr-1)*cachep->objsize;
-                       realobj = (char*)objp+obj_dbghead(cachep);
+                       objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
+                       realobj = (char *)objp + obj_dbghead(cachep);
                        printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
-                                               realobj, size);
+                              realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
-               if (objnr+1 < cachep->num) {
-                       objp = slabp->s_mem+(objnr+1)*cachep->objsize;
-                       realobj = (char*)objp+obj_dbghead(cachep);
+               if (objnr + 1 < cachep->num) {
+                       objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
+                       realobj = (char *)objp + obj_dbghead(cachep);
                        printk(KERN_ERR "Next obj: start=%p, len=%d\n",
-                                               realobj, size);
+                              realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
        }
@@ -1405,7 +1417,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
  * Before calling the slab must have been unlinked from the cache.
  * The cache-lock is not held/needed.
  */
-static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
 {
        void *addr = slabp->s_mem - slabp->colouroff;
 
@@ -1416,8 +1428,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
 
                if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-                       if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
-                               kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
+                       if ((cachep->objsize % PAGE_SIZE) == 0
+                           && OFF_SLAB(cachep))
+                               kernel_map_pages(virt_to_page(objp),
+                                                cachep->objsize / PAGE_SIZE,
+                                                1);
                        else
                                check_poison_obj(cachep, objp);
 #else
@@ -1427,20 +1442,20 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
                if (cachep->flags & SLAB_RED_ZONE) {
                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "start of a freed object "
-                                                       "was overwritten");
+                                          "was overwritten");
                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "end of a freed object "
-                                                       "was overwritten");
+                                          "was overwritten");
                }
                if (cachep->dtor && !(cachep->flags & SLAB_POISON))
-                       (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0);
+                       (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
        }
 #else
        if (cachep->dtor) {
                int i;
                for (i = 0; i < cachep->num; i++) {
-                       void* objp = slabp->s_mem+cachep->objsize*i;
-                       (cachep->dtor)(objp, cachep, 0);
+                       void *objp = slabp->s_mem + cachep->objsize * i;
+                       (cachep->dtor) (objp, cachep, 0);
                }
        }
 #endif
@@ -1448,7 +1463,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
                struct slab_rcu *slab_rcu;
 
-               slab_rcu = (struct slab_rcu *) slabp;
+               slab_rcu = (struct slab_rcu *)slabp;
                slab_rcu->cachep = cachep;
                slab_rcu->addr = addr;
                call_rcu(&slab_rcu->head, kmem_rcu_free);
@@ -1466,10 +1481,10 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
        int node;
 
        for_each_online_node(node) {
-               cachep->nodelists[node] = &initkmem_list3[index+node];
+               cachep->nodelists[node] = &initkmem_list3[index + node];
                cachep->nodelists[node]->next_reap = jiffies +
-                       REAPTIMEOUT_LIST3 +
-                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                   REAPTIMEOUT_LIST3 +
+                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
        }
 }
 
@@ -1486,7 +1501,7 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
 {
        size_t left_over = 0;
 
-       for ( ; ; cachep->gfporder++) {
+       for (;; cachep->gfporder++) {
                unsigned int num;
                size_t remainder;
 
@@ -1566,14 +1581,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * Sanity checks... these are all serious usage bugs.
         */
        if ((!name) ||
-               in_interrupt() ||
-               (size < BYTES_PER_WORD) ||
-               (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
-               (dtor && !ctor)) {
-                       printk(KERN_ERR "%s: Early error in slab %s\n",
-                                       __FUNCTION__, name);
-                       BUG();
-               }
+           in_interrupt() ||
+           (size < BYTES_PER_WORD) ||
+           (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
+               printk(KERN_ERR "%s: Early error in slab %s\n",
+                      __FUNCTION__, name);
+               BUG();
+       }
 
        down(&cache_chain_sem);
 
@@ -1593,11 +1607,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                set_fs(old_fs);
                if (res) {
                        printk("SLAB: cache with size %d has lost its name\n",
-                                       pc->objsize);
+                              pc->objsize);
                        continue;
                }
 
-               if (!strcmp(pc->name,name)) {
+               if (!strcmp(pc->name, name)) {
                        printk("kmem_cache_create: duplicate cache %s\n", name);
                        dump_stack();
                        goto oops;
@@ -1609,10 +1623,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
                /* No constructor, but inital state check requested */
                printk(KERN_ERR "%s: No con, but init state check "
-                               "requested - %s\n", __FUNCTION__, name);
+                      "requested - %s\n", __FUNCTION__, name);
                flags &= ~SLAB_DEBUG_INITIAL;
        }
-
 #if FORCED_DEBUG
        /*
         * Enable redzoning and last user accounting, except for caches with
@@ -1620,8 +1633,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * above the next power of two: caches with object sizes just above a
         * power of two have a significant amount of internal fragmentation.
         */
-       if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
-               flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
+       if ((size < 4096
+            || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
+               flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
        if (!(flags & SLAB_DESTROY_BY_RCU))
                flags |= SLAB_POISON;
 #endif
@@ -1642,9 +1656,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * unaligned accesses for some archs when redzoning is used, and makes
         * sure any on-slab bufctl's are also correctly aligned.
         */
-       if (size & (BYTES_PER_WORD-1)) {
-               size += (BYTES_PER_WORD-1);
-               size &= ~(BYTES_PER_WORD-1);
+       if (size & (BYTES_PER_WORD - 1)) {
+               size += (BYTES_PER_WORD - 1);
+               size &= ~(BYTES_PER_WORD - 1);
        }
 
        /* calculate out the final buffer alignment: */
@@ -1655,7 +1669,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                 * objects into one cacheline.
                 */
                ralign = cache_line_size();
-               while (size <= ralign/2)
+               while (size <= ralign / 2)
                        ralign /= 2;
        } else {
                ralign = BYTES_PER_WORD;
@@ -1664,13 +1678,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        if (ralign < ARCH_SLAB_MINALIGN) {
                ralign = ARCH_SLAB_MINALIGN;
                if (ralign > BYTES_PER_WORD)
-                       flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+                       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        }
        /* 3) caller mandated alignment: disables debug if necessary */
        if (ralign < align) {
                ralign = align;
                if (ralign > BYTES_PER_WORD)
-                       flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+                       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        }
        /* 4) Store it. Note that the debug code below can reduce
         *    the alignment to BYTES_PER_WORD.
@@ -1692,7 +1706,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 
                /* add space for red zone words */
                cachep->dbghead += BYTES_PER_WORD;
-               size += 2*BYTES_PER_WORD;
+               size += 2 * BYTES_PER_WORD;
        }
        if (flags & SLAB_STORE_USER) {
                /* user store requires word alignment and
@@ -1703,7 +1717,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                size += BYTES_PER_WORD;
        }
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-       if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
+       if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
+           && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
                cachep->dbghead += PAGE_SIZE - size;
                size = PAGE_SIZE;
        }
@@ -1711,7 +1726,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 #endif
 
        /* Determine if the slab management is 'on' or 'off' slab. */
-       if (size >= (PAGE_SIZE>>3))
+       if (size >= (PAGE_SIZE >> 3))
                /*
                 * Size is large, assume best to place the slab management obj
                 * off-slab (should allow better packing of objs).
@@ -1728,7 +1743,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                 */
                cachep->gfporder = 0;
                cache_estimate(cachep->gfporder, size, align, flags,
-                                       &left_over, &cachep->num);
+                              &left_over, &cachep->num);
        } else
                left_over = calculate_slab_order(cachep, size, align, flags);
 
@@ -1738,8 +1753,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                cachep = NULL;
                goto oops;
        }
-       slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
-                               + sizeof(struct slab), align);
+       slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+                         + sizeof(struct slab), align);
 
        /*
         * If the slab has been placed off-slab, and we have enough space then
@@ -1752,14 +1767,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 
        if (flags & CFLGS_OFF_SLAB) {
                /* really off slab. No need for manual alignment */
-               slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
+               slab_size =
+                   cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
        }
 
        cachep->colour_off = cache_line_size();
        /* Offset must be a multiple of the alignment. */
        if (cachep->colour_off < align)
                cachep->colour_off = align;
-       cachep->colour = left_over/cachep->colour_off;
+       cachep->colour = left_over / cachep->colour_off;
        cachep->slab_size = slab_size;
        cachep->flags = flags;
        cachep->gfpflags = 0;
@@ -1786,7 +1802,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                         * the creation of further caches will BUG().
                         */
                        cachep->array[smp_processor_id()] =
-                               &initarray_generic.cache;
+                           &initarray_generic.cache;
 
                        /* If the cache that's used by
                         * kmalloc(sizeof(kmem_list3)) is the first cache,
@@ -1800,8 +1816,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                                g_cpucache_up = PARTIAL_AC;
                } else {
                        cachep->array[smp_processor_id()] =
-                               kmalloc(sizeof(struct arraycache_init),
-                                               GFP_KERNEL);
+                           kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
                        if (g_cpucache_up == PARTIAL_AC) {
                                set_up_list3s(cachep, SIZE_L3);
@@ -1811,16 +1826,18 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                                for_each_online_node(node) {
 
                                        cachep->nodelists[node] =
-                                               kmalloc_node(sizeof(struct kmem_list3),
-                                                               GFP_KERNEL, node);
+                                           kmalloc_node(sizeof
+                                                        (struct kmem_list3),
+                                                        GFP_KERNEL, node);
                                        BUG_ON(!cachep->nodelists[node]);
-                                       kmem_list3_init(cachep->nodelists[node]);
+                                       kmem_list3_init(cachep->
+                                                       nodelists[node]);
                                }
                        }
                }
                cachep->nodelists[numa_node_id()]->next_reap =
-                       jiffies + REAPTIMEOUT_LIST3 +
-                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                   jiffies + REAPTIMEOUT_LIST3 +
+                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
 
                BUG_ON(!ac_data(cachep));
                ac_data(cachep)->avail = 0;
@@ -1829,15 +1846,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                ac_data(cachep)->touched = 0;
                cachep->batchcount = 1;
                cachep->limit = BOOT_CPUCACHE_ENTRIES;
-       } 
+       }
 
        /* cache setup completed, link it into the list */
        list_add(&cachep->next, &cache_chain);
        unlock_cpu_hotplug();
-oops:
+      oops:
        if (!cachep && (flags & SLAB_PANIC))
                panic("kmem_cache_create(): failed to create slab `%s'\n",
-                       name);
+                     name);
        up(&cache_chain_sem);
        return cachep;
 }
@@ -1880,7 +1897,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
 /*
  * Waits for all CPUs to execute func().
  */
-static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
+static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
 {
        check_irq_on();
        preempt_disable();
@@ -1895,12 +1912,12 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
        preempt_enable();
 }
 
-static void drain_array_locked(kmem_cache_t* cachep,
-                               struct array_cache *ac, int force, int node);
+static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+                               int force, int node);
 
 static void do_drain(void *arg)
 {
-       kmem_cache_t *cachep = (kmem_cache_t*)arg;
+       kmem_cache_t *cachep = (kmem_cache_t *) arg;
        struct array_cache *ac;
        int node = numa_node_id();
 
@@ -1920,7 +1937,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
        smp_call_function_all_cpus(do_drain, cachep);
        check_irq_on();
        spin_lock_irq(&cachep->spinlock);
-       for_each_online_node(node)  {
+       for_each_online_node(node) {
                l3 = cachep->nodelists[node];
                if (l3) {
                        spin_lock(&l3->list_lock);
@@ -1958,8 +1975,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
                slab_destroy(cachep, slabp);
                spin_lock_irq(&l3->list_lock);
        }
-       ret = !list_empty(&l3->slabs_full) ||
-               !list_empty(&l3->slabs_partial);
+       ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
        return ret;
 }
 
@@ -2015,7 +2031,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  * The caller must guarantee that noone will allocate memory from the cache
  * during the kmem_cache_destroy().
  */
-int kmem_cache_destroy(kmem_cache_t * cachep)
+int kmem_cache_destroy(kmem_cache_t *cachep)
 {
        int i;
        struct kmem_list3 *l3;
@@ -2037,7 +2053,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
        if (__cache_shrink(cachep)) {
                slab_error(cachep, "Can't free all objects");
                down(&cache_chain_sem);
-               list_add(&cachep->next,&cache_chain);
+               list_add(&cachep->next, &cache_chain);
                up(&cache_chain_sem);
                unlock_cpu_hotplug();
                return 1;
@@ -2047,7 +2063,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
                synchronize_rcu();
 
        for_each_online_cpu(i)
-               kfree(cachep->array[i]);
+           kfree(cachep->array[i]);
 
        /* NUMA: free the list3 structures */
        for_each_online_node(i) {
@@ -2066,39 +2082,39 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
 EXPORT_SYMBOL(kmem_cache_destroy);
 
 /* Get the memory for a slab management obj. */
-static struct slaballoc_slabmgmt(kmem_cache_t *cachep, void *objp,
-                       int colour_off, gfp_t local_flags)
+static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
+                                  int colour_off, gfp_t local_flags)
 {
        struct slab *slabp;
-       
+
        if (OFF_SLAB(cachep)) {
                /* Slab management obj is off-slab. */
                slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
                if (!slabp)
                        return NULL;
        } else {
-               slabp = objp+colour_off;
+               slabp = objp + colour_off;
                colour_off += cachep->slab_size;
        }
        slabp->inuse = 0;
        slabp->colouroff = colour_off;
-       slabp->s_mem = objp+colour_off;
+       slabp->s_mem = objp + colour_off;
 
        return slabp;
 }
 
 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
 {
-       return (kmem_bufctl_t *)(slabp+1);
+       return (kmem_bufctl_t *) (slabp + 1);
 }
 
 static void cache_init_objs(kmem_cache_t *cachep,
-                       struct slab *slabp, unsigned long ctor_flags)
+                           struct slab *slabp, unsigned long ctor_flags)
 {
        int i;
 
        for (i = 0; i < cachep->num; i++) {
-               void *objp = slabp->s_mem+cachep->objsize*i;
+               void *objp = slabp->s_mem + cachep->objsize * i;
 #if DEBUG
                /* need to poison the objs? */
                if (cachep->flags & SLAB_POISON)
@@ -2116,25 +2132,28 @@ static void cache_init_objs(kmem_cache_t *cachep,
                 * Otherwise, deadlock. They must also be threaded.
                 */
                if (cachep->ctor && !(cachep->flags & SLAB_POISON))
-                       cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
+                       cachep->ctor(objp + obj_dbghead(cachep), cachep,
+                                    ctor_flags);
 
                if (cachep->flags & SLAB_RED_ZONE) {
                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "constructor overwrote the"
-                                                       " end of an object");
+                                          " end of an object");
                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "constructor overwrote the"
-                                                       " start of an object");
+                                          " start of an object");
                }
-               if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
-                       kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+               if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+                   && cachep->flags & SLAB_POISON)
+                       kernel_map_pages(virt_to_page(objp),
+                                        cachep->objsize / PAGE_SIZE, 0);
 #else
                if (cachep->ctor)
                        cachep->ctor(objp, cachep, ctor_flags);
 #endif
-               slab_bufctl(slabp)[i] = i+1;
+               slab_bufctl(slabp)[i] = i + 1;
        }
-       slab_bufctl(slabp)[i-1] = BUFCTL_END;
+       slab_bufctl(slabp)[i - 1] = BUFCTL_END;
        slabp->free = 0;
 }
 
@@ -2170,17 +2189,17 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
  */
 static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
 {
-       struct slab     *slabp;
-       void            *objp;
-       size_t           offset;
-       gfp_t            local_flags;
-       unsigned long    ctor_flags;
+       struct slab *slabp;
+       void *objp;
+       size_t offset;
+       gfp_t local_flags;
+       unsigned long ctor_flags;
        struct kmem_list3 *l3;
 
        /* Be lazy and only check for valid flags here,
-        * keeping it out of the critical path in kmem_cache_alloc().
+        * keeping it out of the critical path in kmem_cache_alloc().
         */
-       if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
+       if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
                BUG();
        if (flags & SLAB_NO_GROW)
                return 0;
@@ -2246,9 +2265,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
        l3->free_objects += cachep->num;
        spin_unlock(&l3->list_lock);
        return 1;
-opps1:
+      opps1:
        kmem_freepages(cachep, objp);
-failed:
+      failed:
        if (local_flags & __GFP_WAIT)
                local_irq_disable();
        return 0;
@@ -2268,18 +2287,19 @@ static void kfree_debugcheck(const void *objp)
 
        if (!virt_addr_valid(objp)) {
                printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
-                       (unsigned long)objp);   
-               BUG();  
+                      (unsigned long)objp);
+               BUG();
        }
        page = virt_to_page(objp);
        if (!PageSlab(page)) {
-               printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
+               printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
+                      (unsigned long)objp);
                BUG();
        }
 }
 
 static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
-                                       void *caller)
+                                  void *caller)
 {
        struct page *page;
        unsigned int objnr;
@@ -2290,20 +2310,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
        page = virt_to_page(objp);
 
        if (page_get_cache(page) != cachep) {
-               printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
-                               page_get_cache(page),cachep);
+               printk(KERN_ERR
+                      "mismatch in kmem_cache_free: expected cache %p, got %p\n",
+                      page_get_cache(page), cachep);
                printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
-               printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
+               printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
+                      page_get_cache(page)->name);
                WARN_ON(1);
        }
        slabp = page_get_slab(page);
 
        if (cachep->flags & SLAB_RED_ZONE) {
-               if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
-                       slab_error(cachep, "double free, or memory outside"
-                                               " object was overwritten");
-                       printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
-                                       objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+               if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
+                   || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
+                       slab_error(cachep,
+                                  "double free, or memory outside"
+                                  " object was overwritten");
+                       printk(KERN_ERR
+                              "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+                              objp, *dbg_redzone1(cachep, objp),
+                              *dbg_redzone2(cachep, objp));
                }
                *dbg_redzone1(cachep, objp) = RED_INACTIVE;
                *dbg_redzone2(cachep, objp) = RED_INACTIVE;
@@ -2311,30 +2337,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
        if (cachep->flags & SLAB_STORE_USER)
                *dbg_userword(cachep, objp) = caller;
 
-       objnr = (objp-slabp->s_mem)/cachep->objsize;
+       objnr = (objp - slabp->s_mem) / cachep->objsize;
 
        BUG_ON(objnr >= cachep->num);
-       BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize);
+       BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
 
        if (cachep->flags & SLAB_DEBUG_INITIAL) {
                /* Need to call the slab's constructor so the
                 * caller can perform a verify of its state (debugging).
                 * Called without the cache-lock held.
                 */
-               cachep->ctor(objp+obj_dbghead(cachep),
-                                       cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
+               cachep->ctor(objp + obj_dbghead(cachep),
+                            cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
        }
        if (cachep->flags & SLAB_POISON && cachep->dtor) {
                /* we want to cache poison the object,
                 * call the destruction callback
                 */
-               cachep->dtor(objp+obj_dbghead(cachep), cachep, 0);
+               cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
        }
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
                        store_stackinfo(cachep, objp, (unsigned long)caller);
-                       kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+                       kernel_map_pages(virt_to_page(objp),
+                                        cachep->objsize / PAGE_SIZE, 0);
                } else {
                        poison_obj(cachep, objp, POISON_FREE);
                }
@@ -2349,7 +2376,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
 {
        kmem_bufctl_t i;
        int entries = 0;
-       
+
        /* Check slab's freelist to see if this obj is there. */
        for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
                entries++;
@@ -2357,13 +2384,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
                        goto bad;
        }
        if (entries != cachep->num - slabp->inuse) {
-bad:
-               printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
-                               cachep->name, cachep->num, slabp, slabp->inuse);
-               for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
-                       if ((i%16)==0)
+             bad:
+               printk(KERN_ERR
+                      "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
+                      cachep->name, cachep->num, slabp, slabp->inuse);
+               for (i = 0;
+                    i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
+                    i++) {
+                       if ((i % 16) == 0)
                                printk("\n%03x:", i);
-                       printk(" %02x", ((unsigned char*)slabp)[i]);
+                       printk(" %02x", ((unsigned char *)slabp)[i]);
                }
                printk("\n");
                BUG();
@@ -2383,7 +2413,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
 
        check_irq_off();
        ac = ac_data(cachep);
-retry:
+      retry:
        batchcount = ac->batchcount;
        if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
                /* if there was little recent activity on this
@@ -2405,8 +2435,8 @@ retry:
                        shared_array->avail -= batchcount;
                        ac->avail = batchcount;
                        memcpy(ac->entry,
-                               &(shared_array->entry[shared_array->avail]),
-                               sizeof(void*)*batchcount);
+                              &(shared_array->entry[shared_array->avail]),
+                              sizeof(void *) * batchcount);
                        shared_array->touched = 1;
                        goto alloc_done;
                }
@@ -2434,7 +2464,7 @@ retry:
 
                        /* get obj pointer */
                        ac->entry[ac->avail++] = slabp->s_mem +
-                               slabp->free*cachep->objsize;
+                           slabp->free * cachep->objsize;
 
                        slabp->inuse++;
                        next = slab_bufctl(slabp)[slabp->free];
@@ -2442,7 +2472,7 @@ retry:
                        slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
                        WARN_ON(numa_node_id() != slabp->nodeid);
 #endif
-                       slabp->free = next;
+                       slabp->free = next;
                }
                check_slabp(cachep, slabp);
 
@@ -2454,9 +2484,9 @@ retry:
                        list_add(&slabp->list, &l3->slabs_partial);
        }
 
-must_grow:
+      must_grow:
        l3->free_objects -= ac->avail;
-alloc_done:
+      alloc_done:
        spin_unlock(&l3->list_lock);
 
        if (unlikely(!ac->avail)) {
@@ -2468,7 +2498,7 @@ alloc_done:
                if (!x && ac->avail == 0)       // no objects in sight? abort
                        return NULL;
 
-               if (!ac->avail)         // objects refilled by interrupt?
+               if (!ac->avail) // objects refilled by interrupt?
                        goto retry;
        }
        ac->touched = 1;
@@ -2485,16 +2515,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
 }
 
 #if DEBUG
-static void *
-cache_alloc_debugcheck_after(kmem_cache_t *cachep,
-                       gfp_t flags, void *objp, void *caller)
+static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
+                                       void *objp, void *caller)
 {
-       if (!objp)      
+       if (!objp)
                return objp;
-       if (cachep->flags & SLAB_POISON) {
+       if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
-                       kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
+                       kernel_map_pages(virt_to_page(objp),
+                                        cachep->objsize / PAGE_SIZE, 1);
                else
                        check_poison_obj(cachep, objp);
 #else
@@ -2506,24 +2536,28 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
                *dbg_userword(cachep, objp) = caller;
 
        if (cachep->flags & SLAB_RED_ZONE) {
-               if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
-                       slab_error(cachep, "double free, or memory outside"
-                                               " object was overwritten");
-                       printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
-                                       objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+               if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
+                   || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
+                       slab_error(cachep,
+                                  "double free, or memory outside"
+                                  " object was overwritten");
+                       printk(KERN_ERR
+                              "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+                              objp, *dbg_redzone1(cachep, objp),
+                              *dbg_redzone2(cachep, objp));
                }
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
        objp += obj_dbghead(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON) {
-               unsigned long   ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+               unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
 
                if (!(flags & __GFP_WAIT))
                        ctor_flags |= SLAB_CTOR_ATOMIC;
 
                cachep->ctor(objp, cachep, ctor_flags);
-       }       
+       }
        return objp;
 }
 #else
@@ -2532,7 +2566,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
 
 static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
 {
-       voidobjp;
+       void *objp;
        struct array_cache *ac;
 
        check_irq_off();
@@ -2551,7 +2585,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
 static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
 {
        unsigned long save_flags;
-       voidobjp;
+       void *objp;
 
        cache_alloc_debugcheck_before(cachep, flags);
 
@@ -2559,7 +2593,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
        objp = ____cache_alloc(cachep, flags);
        local_irq_restore(save_flags);
        objp = cache_alloc_debugcheck_after(cachep, flags, objp,
-                                       __builtin_return_address(0));
+                                           __builtin_return_address(0));
        prefetchw(objp);
        return objp;
 }
@@ -2571,74 +2605,75 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
 static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
 {
        struct list_head *entry;
-       struct slab *slabp;
-       struct kmem_list3 *l3;
-       void *obj;
-       kmem_bufctl_t next;
-       int x;
-
-       l3 = cachep->nodelists[nodeid];
-       BUG_ON(!l3);
-
-retry:
-       spin_lock(&l3->list_lock);
-       entry = l3->slabs_partial.next;
-       if (entry == &l3->slabs_partial) {
-               l3->free_touched = 1;
-               entry = l3->slabs_free.next;
-               if (entry == &l3->slabs_free)
-                       goto must_grow;
-       }
-
-       slabp = list_entry(entry, struct slab, list);
-       check_spinlock_acquired_node(cachep, nodeid);
-       check_slabp(cachep, slabp);
-
-       STATS_INC_NODEALLOCS(cachep);
-       STATS_INC_ACTIVE(cachep);
-       STATS_SET_HIGH(cachep);
-
-       BUG_ON(slabp->inuse == cachep->num);
-
-       /* get obj pointer */
-       obj =  slabp->s_mem + slabp->free*cachep->objsize;
-       slabp->inuse++;
-       next = slab_bufctl(slabp)[slabp->free];
+       struct slab *slabp;
+       struct kmem_list3 *l3;
+       void *obj;
+       kmem_bufctl_t next;
+       int x;
+
+       l3 = cachep->nodelists[nodeid];
+       BUG_ON(!l3);
+
+      retry:
+       spin_lock(&l3->list_lock);
+       entry = l3->slabs_partial.next;
+       if (entry == &l3->slabs_partial) {
+               l3->free_touched = 1;
+               entry = l3->slabs_free.next;
+               if (entry == &l3->slabs_free)
+                       goto must_grow;
+       }
+
+       slabp = list_entry(entry, struct slab, list);
+       check_spinlock_acquired_node(cachep, nodeid);
+       check_slabp(cachep, slabp);
+
+       STATS_INC_NODEALLOCS(cachep);
+       STATS_INC_ACTIVE(cachep);
+       STATS_SET_HIGH(cachep);
+
+       BUG_ON(slabp->inuse == cachep->num);
+
+       /* get obj pointer */
+       obj = slabp->s_mem + slabp->free * cachep->objsize;
+       slabp->inuse++;
+       next = slab_bufctl(slabp)[slabp->free];
 #if DEBUG
-       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
 #endif
-       slabp->free = next;
-       check_slabp(cachep, slabp);
-       l3->free_objects--;
-       /* move slabp to correct slabp list: */
-       list_del(&slabp->list);
-
-       if (slabp->free == BUFCTL_END) {
-               list_add(&slabp->list, &l3->slabs_full);
-       } else {
-               list_add(&slabp->list, &l3->slabs_partial);
-       }
+       slabp->free = next;
+       check_slabp(cachep, slabp);
+       l3->free_objects--;
+       /* move slabp to correct slabp list: */
+       list_del(&slabp->list);
+
+       if (slabp->free == BUFCTL_END) {
+               list_add(&slabp->list, &l3->slabs_full);
+       } else {
+               list_add(&slabp->list, &l3->slabs_partial);
+       }
 
-       spin_unlock(&l3->list_lock);
-       goto done;
+       spin_unlock(&l3->list_lock);
+       goto done;
 
-must_grow:
-       spin_unlock(&l3->list_lock);
-       x = cache_grow(cachep, flags, nodeid);
+      must_grow:
+       spin_unlock(&l3->list_lock);
+       x = cache_grow(cachep, flags, nodeid);
 
-       if (!x)
-               return NULL;
+       if (!x)
+               return NULL;
 
-       goto retry;
-done:
-       return obj;
+       goto retry;
+      done:
+       return obj;
 }
 #endif
 
 /*
  * Caller needs to acquire correct kmem_list's list_lock
  */
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
+static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
+                      int node)
 {
        int i;
        struct kmem_list3 *l3;
@@ -2661,7 +2696,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
 
                if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
                        printk(KERN_ERR "slab: double free detected in cache "
-                                       "'%s', objp %p\n", cachep->name, objp);
+                              "'%s', objp %p\n", cachep->name, objp);
                        BUG();
                }
 #endif
@@ -2705,20 +2740,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
        spin_lock(&l3->list_lock);
        if (l3->shared) {
                struct array_cache *shared_array = l3->shared;
-               int max = shared_array->limit-shared_array->avail;
+               int max = shared_array->limit - shared_array->avail;
                if (max) {
                        if (batchcount > max)
                                batchcount = max;
                        memcpy(&(shared_array->entry[shared_array->avail]),
-                                       ac->entry,
-                                       sizeof(void*)*batchcount);
+                              ac->entry, sizeof(void *) * batchcount);
                        shared_array->avail += batchcount;
                        goto free_done;
                }
        }
 
        free_block(cachep, ac->entry, batchcount, node);
-free_done:
+      free_done:
 #if STATS
        {
                int i = 0;
@@ -2740,10 +2774,9 @@ free_done:
        spin_unlock(&l3->list_lock);
        ac->avail -= batchcount;
        memmove(ac->entry, &(ac->entry[batchcount]),
-                       sizeof(void*)*ac->avail);
+               sizeof(void *) * ac->avail);
 }
 
-
 /*
  * __cache_free
  * Release an obj back to its cache. If the obj has a constructed
@@ -2768,7 +2801,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
                if (unlikely(slabp->nodeid != numa_node_id())) {
                        struct array_cache *alien = NULL;
                        int nodeid = slabp->nodeid;
-                       struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()];
+                       struct kmem_list3 *l3 =
+                           cachep->nodelists[numa_node_id()];
 
                        STATS_INC_NODEFREES(cachep);
                        if (l3->alien && l3->alien[nodeid]) {
@@ -2776,15 +2810,15 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
                                spin_lock(&alien->lock);
                                if (unlikely(alien->avail == alien->limit))
                                        __drain_alien_cache(cachep,
-                                                       alien, nodeid);
+                                                           alien, nodeid);
                                alien->entry[alien->avail++] = objp;
                                spin_unlock(&alien->lock);
                        } else {
                                spin_lock(&(cachep->nodelists[nodeid])->
-                                               list_lock);
+                                         list_lock);
                                free_block(cachep, &objp, 1, nodeid);
                                spin_unlock(&(cachep->nodelists[nodeid])->
-                                               list_lock);
+                                           list_lock);
                        }
                        return;
                }
@@ -2831,9 +2865,9 @@ EXPORT_SYMBOL(kmem_cache_alloc);
  */
 int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
 {
-       unsigned long addr = (unsigned long) ptr;
+       unsigned long addr = (unsigned long)ptr;
        unsigned long min_addr = PAGE_OFFSET;
-       unsigned long align_mask = BYTES_PER_WORD-1;
+       unsigned long align_mask = BYTES_PER_WORD - 1;
        unsigned long size = cachep->objsize;
        struct page *page;
 
@@ -2853,7 +2887,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
        if (unlikely(page_get_cache(page) != cachep))
                goto out;
        return 1;
-out:
+      out:
        return 0;
 }
 
@@ -2880,8 +2914,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
 
        if (unlikely(!cachep->nodelists[nodeid])) {
                /* Fall back to __cache_alloc if we run into trouble */
-               printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name);
-               return __cache_alloc(cachep,flags);
+               printk(KERN_WARNING
+                      "slab: not allocating in inactive node %d for cache %s\n",
+                      nodeid, cachep->name);
+               return __cache_alloc(cachep, flags);
        }
 
        cache_alloc_debugcheck_before(cachep, flags);
@@ -2891,7 +2927,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
        else
                ptr = __cache_alloc_node(cachep, flags, nodeid);
        local_irq_restore(save_flags);
-       ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
+       ptr =
+           cache_alloc_debugcheck_after(cachep, flags, ptr,
+                                        __builtin_return_address(0));
 
        return ptr;
 }
@@ -2957,7 +2995,7 @@ EXPORT_SYMBOL(__kmalloc);
 void *__alloc_percpu(size_t size)
 {
        int i;
-       struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
+       struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
 
        if (!pdata)
                return NULL;
@@ -2981,9 +3019,9 @@ void *__alloc_percpu(size_t size)
        }
 
        /* Catch derefs w/o wrappers */
-       return (void *) (~(unsigned long) pdata);
+       return (void *)(~(unsigned long)pdata);
 
-unwind_oom:
+      unwind_oom:
        while (--i >= 0) {
                if (!cpu_possible(i))
                        continue;
@@ -3046,7 +3084,7 @@ void kfree(const void *objp)
        local_irq_save(flags);
        kfree_debugcheck(objp);
        c = page_get_cache(virt_to_page(objp));
-       __cache_free(c, (void*)objp);
+       __cache_free(c, (void *)objp);
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kfree);
@@ -3059,17 +3097,16 @@ EXPORT_SYMBOL(kfree);
  * Don't free memory not originally allocated by alloc_percpu()
  * The complemented objp is to check for that.
  */
-void
-free_percpu(const void *objp)
+void free_percpu(const void *objp)
 {
        int i;
-       struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
+       struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
 
        /*
         * We allocate for all cpus so we cannot use for online cpu here.
         */
        for_each_cpu(i)
-               kfree(p->ptrs[i]);
+           kfree(p->ptrs[i]);
        kfree(p);
 }
 EXPORT_SYMBOL(free_percpu);
@@ -3103,44 +3140,44 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
                if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
                        goto fail;
 #endif
-               if (!(new = alloc_arraycache(node, (cachep->shared*
-                               cachep->batchcount), 0xbaadf00d)))
+               if (!(new = alloc_arraycache(node, (cachep->shared *
+                                                   cachep->batchcount),
+                                            0xbaadf00d)))
                        goto fail;
                if ((l3 = cachep->nodelists[node])) {
 
                        spin_lock_irq(&l3->list_lock);
 
                        if ((nc = cachep->nodelists[node]->shared))
-                               free_block(cachep, nc->entry,
-                                                       nc->avail, node);
+                               free_block(cachep, nc->entry, nc->avail, node);
 
                        l3->shared = new;
                        if (!cachep->nodelists[node]->alien) {
                                l3->alien = new_alien;
                                new_alien = NULL;
                        }
-                       l3->free_limit = (1 + nr_cpus_node(node))*
-                               cachep->batchcount + cachep->num;
+                       l3->free_limit = (1 + nr_cpus_node(node)) *
+                           cachep->batchcount + cachep->num;
                        spin_unlock_irq(&l3->list_lock);
                        kfree(nc);
                        free_alien_cache(new_alien);
                        continue;
                }
                if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
-                                               GFP_KERNEL, node)))
+                                       GFP_KERNEL, node)))
                        goto fail;
 
                kmem_list3_init(l3);
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
-                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
                l3->shared = new;
                l3->alien = new_alien;
-               l3->free_limit = (1 + nr_cpus_node(node))*
-                       cachep->batchcount + cachep->num;
+               l3->free_limit = (1 + nr_cpus_node(node)) *
+                   cachep->batchcount + cachep->num;
                cachep->nodelists[node] = l3;
        }
        return err;
-fail:
+      fail:
        err = -ENOMEM;
        return err;
 }
@@ -3162,18 +3199,19 @@ static void do_ccupdate_local(void *info)
        new->new[smp_processor_id()] = old;
 }
 
-
 static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
-                               int shared)
+                           int shared)
 {
        struct ccupdate_struct new;
        int i, err;
 
-       memset(&new.new,0,sizeof(new.new));
+       memset(&new.new, 0, sizeof(new.new));
        for_each_online_cpu(i) {
-               new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount);
+               new.new[i] =
+                   alloc_arraycache(cpu_to_node(i), limit, batchcount);
                if (!new.new[i]) {
-                       for (i--; i >= 0; i--) kfree(new.new[i]);
+                       for (i--; i >= 0; i--)
+                               kfree(new.new[i]);
                        return -ENOMEM;
                }
        }
@@ -3201,13 +3239,12 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
        err = alloc_kmemlist(cachep);
        if (err) {
                printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
-                               cachep->name, -err);
+                      cachep->name, -err);
                BUG();
        }
        return 0;
 }
 
-
 static void enable_cpucache(kmem_cache_t *cachep)
 {
        int err;
@@ -3254,14 +3291,14 @@ static void enable_cpucache(kmem_cache_t *cachep)
        if (limit > 32)
                limit = 32;
 #endif
-       err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared);
+       err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
        if (err)
                printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
-                                       cachep->name, -err);
+                      cachep->name, -err);
 }
 
-static void drain_array_locked(kmem_cache_t *cachep,
-                               struct array_cache *ac, int force, int node)
+static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+                               int force, int node)
 {
        int tofree;
 
@@ -3269,14 +3306,14 @@ static void drain_array_locked(kmem_cache_t *cachep,
        if (ac->touched && !force) {
                ac->touched = 0;
        } else if (ac->avail) {
-               tofree = force ? ac->avail : (ac->limit+4)/5;
+               tofree = force ? ac->avail : (ac->limit + 4) / 5;
                if (tofree > ac->avail) {
-                       tofree = (ac->avail+1)/2;
+                       tofree = (ac->avail + 1) / 2;
                }
                free_block(cachep, ac->entry, tofree, node);
                ac->avail -= tofree;
                memmove(ac->entry, &(ac->entry[tofree]),
-                                       sizeof(void*)*ac->avail);
+                       sizeof(void *) * ac->avail);
        }
 }
 
@@ -3299,13 +3336,14 @@ static void cache_reap(void *unused)
 
        if (down_trylock(&cache_chain_sem)) {
                /* Give up. Setup the next iteration. */
-               schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
+               schedule_delayed_work(&__get_cpu_var(reap_work),
+                                     REAPTIMEOUT_CPUC);
                return;
        }
 
        list_for_each(walk, &cache_chain) {
                kmem_cache_t *searchp;
-               struct list_headp;
+               struct list_head *p;
                int tofree;
                struct slab *slabp;
 
@@ -3322,7 +3360,7 @@ static void cache_reap(void *unused)
                spin_lock_irq(&l3->list_lock);
 
                drain_array_locked(searchp, ac_data(searchp), 0,
-                               numa_node_id());
+                                  numa_node_id());
 
                if (time_after(l3->next_reap, jiffies))
                        goto next_unlock;
@@ -3331,14 +3369,16 @@ static void cache_reap(void *unused)
 
                if (l3->shared)
                        drain_array_locked(searchp, l3->shared, 0,
-                               numa_node_id());
+                                          numa_node_id());
 
                if (l3->free_touched) {
                        l3->free_touched = 0;
                        goto next_unlock;
                }
 
-               tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num);
+               tofree =
+                   (l3->free_limit + 5 * searchp->num -
+                    1) / (5 * searchp->num);
                do {
                        p = l3->slabs_free.next;
                        if (p == &(l3->slabs_free))
@@ -3358,10 +3398,10 @@ static void cache_reap(void *unused)
                        spin_unlock_irq(&l3->list_lock);
                        slab_destroy(searchp, slabp);
                        spin_lock_irq(&l3->list_lock);
-               } while(--tofree > 0);
-next_unlock:
+               } while (--tofree > 0);
+             next_unlock:
                spin_unlock_irq(&l3->list_lock);
-next:
+             next:
                cond_resched();
        }
        check_irq_on();
@@ -3418,7 +3458,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
        kmem_cache_t *cachep = p;
        ++*pos;
        return cachep->next.next == &cache_chain ? NULL
-               : list_entry(cachep->next.next, kmem_cache_t, next);
+           : list_entry(cachep->next.next, kmem_cache_t, next);
 }
 
 static void s_stop(struct seq_file *m, void *p)
@@ -3430,11 +3470,11 @@ static int s_show(struct seq_file *m, void *p)
 {
        kmem_cache_t *cachep = p;
        struct list_head *q;
-       struct slab     *slabp;
-       unsigned long   active_objs;
-       unsigned long   num_objs;
-       unsigned long   active_slabs = 0;
-       unsigned long   num_slabs, free_objects = 0, shared_avail = 0;
+       struct slab *slabp;
+       unsigned long active_objs;
+       unsigned long num_objs;
+       unsigned long active_slabs = 0;
+       unsigned long num_slabs, free_objects = 0, shared_avail = 0;
        const char *name;
        char *error = NULL;
        int node;
@@ -3451,14 +3491,14 @@ static int s_show(struct seq_file *m, void *p)
 
                spin_lock(&l3->list_lock);
 
-               list_for_each(q,&l3->slabs_full) {
+               list_for_each(q, &l3->slabs_full) {
                        slabp = list_entry(q, struct slab, list);
                        if (slabp->inuse != cachep->num && !error)
                                error = "slabs_full accounting error";
                        active_objs += cachep->num;
                        active_slabs++;
                }
-               list_for_each(q,&l3->slabs_partial) {
+               list_for_each(q, &l3->slabs_partial) {
                        slabp = list_entry(q, struct slab, list);
                        if (slabp->inuse == cachep->num && !error)
                                error = "slabs_partial inuse accounting error";
@@ -3467,7 +3507,7 @@ static int s_show(struct seq_file *m, void *p)
                        active_objs += slabp->inuse;
                        active_slabs++;
                }
-               list_for_each(q,&l3->slabs_free) {
+               list_for_each(q, &l3->slabs_free) {
                        slabp = list_entry(q, struct slab, list);
                        if (slabp->inuse && !error)
                                error = "slabs_free/inuse accounting error";
@@ -3478,25 +3518,24 @@ static int s_show(struct seq_file *m, void *p)
 
                spin_unlock(&l3->list_lock);
        }
-       num_slabs+=active_slabs;
-       num_objs = num_slabs*cachep->num;
+       num_slabs += active_slabs;
+       num_objs = num_slabs * cachep->num;
        if (num_objs - active_objs != free_objects && !error)
                error = "free_objects accounting error";
 
-       name = cachep->name; 
+       name = cachep->name;
        if (error)
                printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
 
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-               name, active_objs, num_objs, cachep->objsize,
-               cachep->num, (1<<cachep->gfporder));
+                  name, active_objs, num_objs, cachep->objsize,
+                  cachep->num, (1 << cachep->gfporder));
        seq_printf(m, " : tunables %4u %4u %4u",
-                       cachep->limit, cachep->batchcount,
-                       cachep->shared);
+                  cachep->limit, cachep->batchcount, cachep->shared);
        seq_printf(m, " : slabdata %6lu %6lu %6lu",
-                       active_slabs, num_slabs, shared_avail);
+                  active_slabs, num_slabs, shared_avail);
 #if STATS
-       {       /* list3 stats */
+       {                       /* list3 stats */
                unsigned long high = cachep->high_mark;
                unsigned long allocs = cachep->num_allocations;
                unsigned long grown = cachep->grown;
@@ -3507,9 +3546,7 @@ static int s_show(struct seq_file *m, void *p)
                unsigned long node_frees = cachep->node_frees;
 
                seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
-                               %4lu %4lu %4lu %4lu",
-                               allocs, high, grown, reaped, errors,
-                               max_freeable, node_allocs, node_frees);
+                               %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
        }
        /* cpu stats */
        {
@@ -3519,7 +3556,7 @@ static int s_show(struct seq_file *m, void *p)
                unsigned long freemiss = atomic_read(&cachep->freemiss);
 
                seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
-                       allochit, allocmiss, freehit, freemiss);
+                          allochit, allocmiss, freehit, freemiss);
        }
 #endif
        seq_putc(m, '\n');
@@ -3542,10 +3579,10 @@ static int s_show(struct seq_file *m, void *p)
  */
 
 struct seq_operations slabinfo_op = {
-       .start  = s_start,
-       .next   = s_next,
-       .stop   = s_stop,
-       .show   = s_show,
+       .start = s_start,
+       .next = s_next,
+       .stop = s_stop,
+       .show = s_show,
 };
 
 #define MAX_SLABINFO_WRITE 128
@@ -3556,18 +3593,18 @@ struct seq_operations slabinfo_op = {
  * @count: data length
  * @ppos: unused
  */
-ssize_t slabinfo_write(struct file *file, const char __user *buffer,
-                               size_t count, loff_t *ppos)
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+                      size_t count, loff_t *ppos)
 {
-       char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
+       char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
        int limit, batchcount, shared, res;
        struct list_head *p;
-       
+
        if (count > MAX_SLABINFO_WRITE)
                return -EINVAL;
        if (copy_from_user(&kbuf, buffer, count))
                return -EFAULT;
-       kbuf[MAX_SLABINFO_WRITE] = '\0'; 
+       kbuf[MAX_SLABINFO_WRITE] = '\0';
 
        tmp = strchr(kbuf, ' ');
        if (!tmp)
@@ -3580,18 +3617,17 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
        /* Find the cache in the chain of caches. */
        down(&cache_chain_sem);
        res = -EINVAL;
-       list_for_each(p,&cache_chain) {
+       list_for_each(p, &cache_chain) {
                kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
 
                if (!strcmp(cachep->name, kbuf)) {
                        if (limit < 1 ||
                            batchcount < 1 ||
-                           batchcount > limit ||
-                           shared < 0) {
+                           batchcount > limit || shared < 0) {
                                res = 0;
                        } else {
                                res = do_tune_cpucache(cachep, limit,
-                                                       batchcount, shared);
+                                                      batchcount, shared);
                        }
                        break;
                }