timer.c: cleanup recently introduced whitespace damage
[powerpc.git] / kernel / lockdep.c
index cb64022..734da57 100644 (file)
@@ -177,6 +177,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
 
                lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
                lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
+
+               for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
+                       stats.bounces[i] += pcs->bounces[i];
        }
 
        return stats;
@@ -372,6 +375,11 @@ unsigned int max_recursion_depth;
  * about it later on, in lockdep_info().
  */
 static int lockdep_init_error;
+static unsigned long lockdep_init_trace_data[20];
+static struct stack_trace lockdep_init_trace = {
+       .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
+       .entries = lockdep_init_trace_data,
+};
 
 /*
  * Various lockdep statistics:
@@ -659,6 +667,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        if (unlikely(!lockdep_initialized)) {
                lockdep_init();
                lockdep_init_error = 1;
+               save_stack_trace(&lockdep_init_trace);
        }
 #endif
 
@@ -2325,6 +2334,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        lock->name = name;
        lock->key = key;
        lock->class_cache = NULL;
+#ifdef CONFIG_LOCK_STAT
+       lock->cpu = raw_smp_processor_id();
+#endif
        if (subclass)
                register_lock_class(lock, subclass, 1);
 }
@@ -2775,6 +2787,8 @@ found_it:
        stats = get_lock_stats(hlock->class);
        if (point < ARRAY_SIZE(stats->contention_point))
                stats->contention_point[i]++;
+       if (lock->cpu != smp_processor_id())
+               stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
 }
 
@@ -2786,8 +2800,8 @@ __lock_acquired(struct lockdep_map *lock)
        struct lock_class_stats *stats;
        unsigned int depth;
        u64 now;
-       s64 waittime;
-       int i;
+       s64 waittime = 0;
+       int i, cpu;
 
        depth = curr->lockdep_depth;
        if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -2809,19 +2823,25 @@ __lock_acquired(struct lockdep_map *lock)
        return;
 
 found_it:
-       if (!hlock->waittime_stamp)
-               return;
-
-       now = sched_clock();
-       waittime = now - hlock->waittime_stamp;
-       hlock->holdtime_stamp = now;
+       cpu = smp_processor_id();
+       if (hlock->waittime_stamp) {
+               now = sched_clock();
+               waittime = now - hlock->waittime_stamp;
+               hlock->holdtime_stamp = now;
+       }
 
        stats = get_lock_stats(hlock->class);
-       if (hlock->read)
-               lock_time_inc(&stats->read_waittime, waittime);
-       else
-               lock_time_inc(&stats->write_waittime, waittime);
+       if (waittime) {
+               if (hlock->read)
+                       lock_time_inc(&stats->read_waittime, waittime);
+               else
+                       lock_time_inc(&stats->write_waittime, waittime);
+       }
+       if (lock->cpu != cpu)
+               stats->bounces[bounce_acquired + !!hlock->read]++;
        put_lock_stats(stats);
+
+       lock->cpu = cpu;
 }
 
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3026,8 +3046,11 @@ void __init lockdep_info(void)
                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
 
 #ifdef CONFIG_DEBUG_LOCKDEP
-       if (lockdep_init_error)
-               printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
+       if (lockdep_init_error) {
+               printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+               printk("Call stack leading to lockdep invocation was:\n");
+               print_stack_trace(&lockdep_init_trace, 0);
+       }
 #endif
 }