Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
[powerpc.git] / mm / slub.c
index 3e614c1..bd2efae 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1577,34 +1577,75 @@ static int slub_nomerge;
  * requested a higher mininum order then we start with that one instead of
  * the smallest order which will fit the object.
  */
-static int calculate_order(int size)
+static inline int slab_order(int size, int min_objects,
+                               int max_order, int fract_leftover)
 {
        int order;
        int rem;
 
-       for (order = max(slub_min_order, fls(size - 1) - PAGE_SHIFT);
-                       order < MAX_ORDER; order++) {
-               unsigned long slab_size = PAGE_SIZE << order;
+       for (order = max(slub_min_order,
+                               fls(min_objects * size - 1) - PAGE_SHIFT);
+                       order <= max_order; order++) {
 
-               if (slub_max_order > order &&
-                               slab_size < slub_min_objects * size)
-                       continue;
+               unsigned long slab_size = PAGE_SIZE << order;
 
-               if (slab_size < size)
+               if (slab_size < min_objects * size)
                        continue;
 
                rem = slab_size % size;
 
-               if (rem <= slab_size / 8)
+               if (rem <= slab_size / fract_leftover)
                        break;
 
        }
-       if (order >= MAX_ORDER)
-               return -E2BIG;
 
        return order;
 }
 
+static inline int calculate_order(int size)
+{
+       int order;
+       int min_objects;
+       int fraction;
+
+       /*
+        * Attempt to find best configuration for a slab. This
+        * works by first attempting to generate a layout with
+        * the best configuration and backing off gradually.
+        *
+        * First we reduce the acceptable waste in a slab. Then
+        * we reduce the minimum objects required in a slab.
+        */
+       min_objects = slub_min_objects;
+       while (min_objects > 1) {
+               fraction = 8;
+               while (fraction >= 4) {
+                       order = slab_order(size, min_objects,
+                                               slub_max_order, fraction);
+                       if (order <= slub_max_order)
+                               return order;
+                       fraction /= 2;
+               }
+               min_objects /= 2;
+       }
+
+       /*
+        * We were unable to place multiple objects in a slab. Now
+        * lets see if we can place a single object there.
+        */
+       order = slab_order(size, 1, slub_max_order, 1);
+       if (order <= slub_max_order)
+               return order;
+
+       /*
+        * Doh this slab cannot be placed using slub_max_order.
+        */
+       order = slab_order(size, 1, MAX_ORDER, 1);
+       if (order <= MAX_ORDER)
+               return order;
+       return -ENOSYS;
+}
+
 /*
  * Figure out what the alignment of the objects will be.
  */
@@ -2245,7 +2286,6 @@ EXPORT_SYMBOL(kmem_cache_shrink);
 
 /**
  * krealloc - reallocate memory. The contents will remain unchanged.
- *
  * @p: object to reallocate memory for.
  * @new_size: how many bytes of memory are required.
  * @flags: the type of memory to allocate.
@@ -2474,7 +2514,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                for_all_slabs(__flush_cpu_slab, cpu);
                break;
        default:
@@ -2488,91 +2530,6 @@ static struct notifier_block __cpuinitdata slab_notifier =
 
 #endif
 
-#ifdef CONFIG_NUMA
-
-/*****************************************************************
- * Generic reaper used to support the page allocator
- * (the cpu slabs are reaped by a per slab workqueue).
- *
- * Maybe move this to the page allocator?
- ****************************************************************/
-
-static DEFINE_PER_CPU(unsigned long, reap_node);
-
-static void init_reap_node(int cpu)
-{
-       int node;
-
-       node = next_node(cpu_to_node(cpu), node_online_map);
-       if (node == MAX_NUMNODES)
-               node = first_node(node_online_map);
-
-       __get_cpu_var(reap_node) = node;
-}
-
-static void next_reap_node(void)
-{
-       int node = __get_cpu_var(reap_node);
-
-       /*
-        * Also drain per cpu pages on remote zones
-        */
-       if (node != numa_node_id())
-               drain_node_pages(node);
-
-       node = next_node(node, node_online_map);
-       if (unlikely(node >= MAX_NUMNODES))
-               node = first_node(node_online_map);
-       __get_cpu_var(reap_node) = node;
-}
-#else
-#define init_reap_node(cpu) do { } while (0)
-#define next_reap_node(void) do { } while (0)
-#endif
-
-#define REAPTIMEOUT_CPUC       (2*HZ)
-
-#ifdef CONFIG_SMP
-static DEFINE_PER_CPU(struct delayed_work, reap_work);
-
-static void cache_reap(struct work_struct *unused)
-{
-       next_reap_node();
-       refresh_cpu_vm_stats(smp_processor_id());
-       schedule_delayed_work(&__get_cpu_var(reap_work),
-                                     REAPTIMEOUT_CPUC);
-}
-
-static void __devinit start_cpu_timer(int cpu)
-{
-       struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
-
-       /*
-        * When this gets called from do_initcalls via cpucache_init(),
-        * init_workqueues() has already run, so keventd will be setup
-        * at that time.
-        */
-       if (keventd_up() && reap_work->work.func == NULL) {
-               init_reap_node(cpu);
-               INIT_DELAYED_WORK(reap_work, cache_reap);
-               schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
-       }
-}
-
-static int __init cpucache_init(void)
-{
-       int cpu;
-
-       /*
-        * Register the timers that drain pcp pages and update vm statistics
-        */
-       for_each_online_cpu(cpu)
-               start_cpu_timer(cpu);
-       return 0;
-}
-__initcall(cpucache_init);
-#endif
-
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
 {
        struct kmem_cache *s = get_slab(size, gfpflags);
@@ -2751,6 +2708,13 @@ static void resiliency_test(void) {};
 struct location {
        unsigned long count;
        void *addr;
+       long long sum_time;
+       long min_time;
+       long max_time;
+       long min_pid;
+       long max_pid;
+       cpumask_t cpus;
+       nodemask_t nodes;
 };
 
 struct loc_track {
@@ -2791,11 +2755,12 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max)
 }
 
 static int add_location(struct loc_track *t, struct kmem_cache *s,
-                                               void *addr)
+                               const struct track *track)
 {
        long start, end, pos;
        struct location *l;
        void *caddr;
+       unsigned long age = jiffies - track->when;
 
        start = -1;
        end = t->count;
@@ -2811,12 +2776,29 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
                        break;
 
                caddr = t->loc[pos].addr;
-               if (addr == caddr) {
-                       t->loc[pos].count++;
+               if (track->addr == caddr) {
+
+                       l = &t->loc[pos];
+                       l->count++;
+                       if (track->when) {
+                               l->sum_time += age;
+                               if (age < l->min_time)
+                                       l->min_time = age;
+                               if (age > l->max_time)
+                                       l->max_time = age;
+
+                               if (track->pid < l->min_pid)
+                                       l->min_pid = track->pid;
+                               if (track->pid > l->max_pid)
+                                       l->max_pid = track->pid;
+
+                               cpu_set(track->cpu, l->cpus);
+                       }
+                       node_set(page_to_nid(virt_to_page(track)), l->nodes);
                        return 1;
                }
 
-               if (addr < caddr)
+               if (track->addr < caddr)
                        end = pos;
                else
                        start = pos;
@@ -2834,7 +2816,16 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
                        (t->count - pos) * sizeof(struct location));
        t->count++;
        l->count = 1;
-       l->addr = addr;
+       l->addr = track->addr;
+       l->sum_time = age;
+       l->min_time = age;
+       l->max_time = age;
+       l->min_pid = track->pid;
+       l->max_pid = track->pid;
+       cpus_clear(l->cpus);
+       cpu_set(track->cpu, l->cpus);
+       nodes_clear(l->nodes);
+       node_set(page_to_nid(virt_to_page(track)), l->nodes);
        return 1;
 }
 
@@ -2850,11 +2841,8 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
                set_bit(slab_index(p, s, addr), map);
 
        for_each_object(p, s, addr)
-               if (!test_bit(slab_index(p, s, addr), map)) {
-                       void *addr = get_track(s, p, alloc)->addr;
-
-                       add_location(t, s, addr);
-               }
+               if (!test_bit(slab_index(p, s, addr), map))
+                       add_location(t, s, get_track(s, p, alloc));
 }
 
 static int list_locations(struct kmem_cache *s, char *buf,
@@ -2888,15 +2876,47 @@ static int list_locations(struct kmem_cache *s, char *buf,
        }
 
        for (i = 0; i < t.count; i++) {
-               void *addr = t.loc[i].addr;
+               struct location *l = &t.loc[i];
 
                if (n > PAGE_SIZE - 100)
                        break;
-               n += sprintf(buf + n, "%7ld ", t.loc[i].count);
-               if (addr)
-                       n += sprint_symbol(buf + n, (unsigned long)t.loc[i].addr);
+               n += sprintf(buf + n, "%7ld ", l->count);
+
+               if (l->addr)
+                       n += sprint_symbol(buf + n, (unsigned long)l->addr);
                else
                        n += sprintf(buf + n, "<not-available>");
+
+               if (l->sum_time != l->min_time) {
+                       unsigned long remainder;
+
+                       n += sprintf(buf + n, " age=%ld/%ld/%ld",
+                       l->min_time,
+                       div_long_long_rem(l->sum_time, l->count, &remainder),
+                       l->max_time);
+               } else
+                       n += sprintf(buf + n, " age=%ld",
+                               l->min_time);
+
+               if (l->min_pid != l->max_pid)
+                       n += sprintf(buf + n, " pid=%ld-%ld",
+                               l->min_pid, l->max_pid);
+               else
+                       n += sprintf(buf + n, " pid=%ld",
+                               l->min_pid);
+
+               if (num_online_cpus() > 1 && !cpus_empty(l->cpus)) {
+                       n += sprintf(buf + n, " cpus=");
+                       n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
+                                       l->cpus);
+               }
+
+               if (num_online_nodes() > 1 && !nodes_empty(l->nodes)) {
+                       n += sprintf(buf + n, " nodes=");
+                       n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
+                                       l->nodes);
+               }
+
                n += sprintf(buf + n, "\n");
        }