added proxymity sensor IQS128 as input device
[linux] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28
29 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
32
33 struct debug_bucket {
34         struct hlist_head       list;
35         raw_spinlock_t          lock;
36 };
37
38 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
39
40 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
41
42 static DEFINE_RAW_SPINLOCK(pool_lock);
43
44 static HLIST_HEAD(obj_pool);
45 static HLIST_HEAD(obj_to_free);
46
47 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
48 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
49 static int                      obj_pool_used;
50 static int                      obj_pool_max_used;
51 /* The number of objs on the global free list */
52 static int                      obj_nr_tofree;
53 static struct kmem_cache        *obj_cache;
54
55 static int                      debug_objects_maxchain __read_mostly;
56 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
57 static int                      debug_objects_fixups __read_mostly;
58 static int                      debug_objects_warnings __read_mostly;
59 static int                      debug_objects_enabled __read_mostly
60                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
61 static int                      debug_objects_pool_size __read_mostly
62                                 = ODEBUG_POOL_SIZE;
63 static int                      debug_objects_pool_min_level __read_mostly
64                                 = ODEBUG_POOL_MIN_LEVEL;
65 static struct debug_obj_descr   *descr_test  __read_mostly;
66
67 /*
68  * Track numbers of kmem_cache_alloc()/free() calls done.
69  */
70 static int                      debug_objects_allocated;
71 static int                      debug_objects_freed;
72
73 static void free_obj_work(struct work_struct *work);
74 static DECLARE_WORK(debug_obj_work, free_obj_work);
75
76 static int __init enable_object_debug(char *str)
77 {
78         debug_objects_enabled = 1;
79         return 0;
80 }
81
82 static int __init disable_object_debug(char *str)
83 {
84         debug_objects_enabled = 0;
85         return 0;
86 }
87
88 early_param("debug_objects", enable_object_debug);
89 early_param("no_debug_objects", disable_object_debug);
90
91 static const char *obj_states[ODEBUG_STATE_MAX] = {
92         [ODEBUG_STATE_NONE]             = "none",
93         [ODEBUG_STATE_INIT]             = "initialized",
94         [ODEBUG_STATE_INACTIVE]         = "inactive",
95         [ODEBUG_STATE_ACTIVE]           = "active",
96         [ODEBUG_STATE_DESTROYED]        = "destroyed",
97         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
98 };
99
100 static void fill_pool(void)
101 {
102         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103         struct debug_obj *new, *obj;
104         unsigned long flags;
105
106         if (likely(obj_pool_free >= debug_objects_pool_min_level))
107                 return;
108
109         /*
110          * Reuse objs from the global free list; they will be reinitialized
111          * when allocating.
112          */
113         while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114                 raw_spin_lock_irqsave(&pool_lock, flags);
115                 /*
116                  * Recheck with the lock held as the worker thread might have
117                  * won the race and freed the global free list already.
118                  */
119                 if (obj_nr_tofree) {
120                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121                         hlist_del(&obj->node);
122                         obj_nr_tofree--;
123                         hlist_add_head(&obj->node, &obj_pool);
124                         obj_pool_free++;
125                 }
126                 raw_spin_unlock_irqrestore(&pool_lock, flags);
127         }
128
129         if (unlikely(!obj_cache))
130                 return;
131
132         while (obj_pool_free < debug_objects_pool_min_level) {
133
134                 new = kmem_cache_zalloc(obj_cache, gfp);
135                 if (!new)
136                         return;
137
138                 raw_spin_lock_irqsave(&pool_lock, flags);
139                 hlist_add_head(&new->node, &obj_pool);
140                 debug_objects_allocated++;
141                 obj_pool_free++;
142                 raw_spin_unlock_irqrestore(&pool_lock, flags);
143         }
144 }
145
146 /*
147  * Lookup an object in the hash bucket.
148  */
149 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
150 {
151         struct debug_obj *obj;
152         int cnt = 0;
153
154         hlist_for_each_entry(obj, &b->list, node) {
155                 cnt++;
156                 if (obj->object == addr)
157                         return obj;
158         }
159         if (cnt > debug_objects_maxchain)
160                 debug_objects_maxchain = cnt;
161
162         return NULL;
163 }
164
165 /*
166  * Allocate a new object. If the pool is empty, switch off the debugger.
167  * Must be called with interrupts disabled.
168  */
169 static struct debug_obj *
170 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
171 {
172         struct debug_obj *obj = NULL;
173
174         raw_spin_lock(&pool_lock);
175         if (obj_pool.first) {
176                 obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
177
178                 obj->object = addr;
179                 obj->descr  = descr;
180                 obj->state  = ODEBUG_STATE_NONE;
181                 obj->astate = 0;
182                 hlist_del(&obj->node);
183
184                 hlist_add_head(&obj->node, &b->list);
185
186                 obj_pool_used++;
187                 if (obj_pool_used > obj_pool_max_used)
188                         obj_pool_max_used = obj_pool_used;
189
190                 obj_pool_free--;
191                 if (obj_pool_free < obj_pool_min_free)
192                         obj_pool_min_free = obj_pool_free;
193         }
194         raw_spin_unlock(&pool_lock);
195
196         return obj;
197 }
198
199 /*
200  * workqueue function to free objects.
201  *
202  * To reduce contention on the global pool_lock, the actual freeing of
203  * debug objects will be delayed if the pool_lock is busy.
204  */
205 static void free_obj_work(struct work_struct *work)
206 {
207         struct hlist_node *tmp;
208         struct debug_obj *obj;
209         unsigned long flags;
210         HLIST_HEAD(tofree);
211
212         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
213                 return;
214
215         /*
216          * The objs on the pool list might be allocated before the work is
217          * run, so recheck if pool list it full or not, if not fill pool
218          * list from the global free list
219          */
220         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
221                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
222                 hlist_del(&obj->node);
223                 hlist_add_head(&obj->node, &obj_pool);
224                 obj_pool_free++;
225                 obj_nr_tofree--;
226         }
227
228         /*
229          * Pool list is already full and there are still objs on the free
230          * list. Move remaining free objs to a temporary list to free the
231          * memory outside the pool_lock held region.
232          */
233         if (obj_nr_tofree) {
234                 hlist_move_list(&obj_to_free, &tofree);
235                 debug_objects_freed += obj_nr_tofree;
236                 obj_nr_tofree = 0;
237         }
238         raw_spin_unlock_irqrestore(&pool_lock, flags);
239
240         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
241                 hlist_del(&obj->node);
242                 kmem_cache_free(obj_cache, obj);
243         }
244 }
245
246 static bool __free_object(struct debug_obj *obj)
247 {
248         unsigned long flags;
249         bool work;
250
251         raw_spin_lock_irqsave(&pool_lock, flags);
252         work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
253         obj_pool_used--;
254
255         if (work) {
256                 obj_nr_tofree++;
257                 hlist_add_head(&obj->node, &obj_to_free);
258         } else {
259                 obj_pool_free++;
260                 hlist_add_head(&obj->node, &obj_pool);
261         }
262         raw_spin_unlock_irqrestore(&pool_lock, flags);
263         return work;
264 }
265
266 /*
267  * Put the object back into the pool and schedule work to free objects
268  * if necessary.
269  */
270 static void free_object(struct debug_obj *obj)
271 {
272         if (__free_object(obj))
273                 schedule_work(&debug_obj_work);
274 }
275
276 /*
277  * We run out of memory. That means we probably have tons of objects
278  * allocated.
279  */
280 static void debug_objects_oom(void)
281 {
282         struct debug_bucket *db = obj_hash;
283         struct hlist_node *tmp;
284         HLIST_HEAD(freelist);
285         struct debug_obj *obj;
286         unsigned long flags;
287         int i;
288
289         pr_warn("Out of memory. ODEBUG disabled\n");
290
291         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
292                 raw_spin_lock_irqsave(&db->lock, flags);
293                 hlist_move_list(&db->list, &freelist);
294                 raw_spin_unlock_irqrestore(&db->lock, flags);
295
296                 /* Now free them */
297                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
298                         hlist_del(&obj->node);
299                         free_object(obj);
300                 }
301         }
302 }
303
304 /*
305  * We use the pfn of the address for the hash. That way we can check
306  * for freed objects simply by checking the affected bucket.
307  */
308 static struct debug_bucket *get_bucket(unsigned long addr)
309 {
310         unsigned long hash;
311
312         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
313         return &obj_hash[hash];
314 }
315
316 static void debug_print_object(struct debug_obj *obj, char *msg)
317 {
318         struct debug_obj_descr *descr = obj->descr;
319         static int limit;
320
321         if (limit < 5 && descr != descr_test) {
322                 void *hint = descr->debug_hint ?
323                         descr->debug_hint(obj->object) : NULL;
324                 limit++;
325                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
326                                  "object type: %s hint: %pS\n",
327                         msg, obj_states[obj->state], obj->astate,
328                         descr->name, hint);
329         }
330         debug_objects_warnings++;
331 }
332
333 /*
334  * Try to repair the damage, so we have a better chance to get useful
335  * debug output.
336  */
337 static bool
338 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
339                    void * addr, enum debug_obj_state state)
340 {
341         if (fixup && fixup(addr, state)) {
342                 debug_objects_fixups++;
343                 return true;
344         }
345         return false;
346 }
347
348 static void debug_object_is_on_stack(void *addr, int onstack)
349 {
350         int is_on_stack;
351         static int limit;
352
353         if (limit > 4)
354                 return;
355
356         is_on_stack = object_is_on_stack(addr);
357         if (is_on_stack == onstack)
358                 return;
359
360         limit++;
361         if (is_on_stack)
362                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
363                          task_stack_page(current));
364         else
365                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
366                          task_stack_page(current));
367
368         WARN_ON(1);
369 }
370
371 static void
372 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
373 {
374         enum debug_obj_state state;
375         struct debug_bucket *db;
376         struct debug_obj *obj;
377         unsigned long flags;
378         bool check_object_on_stack = false;
379
380         fill_pool();
381
382         db = get_bucket((unsigned long) addr);
383
384         raw_spin_lock_irqsave(&db->lock, flags);
385
386         obj = lookup_object(addr, db);
387         if (!obj) {
388                 obj = alloc_object(addr, db, descr);
389                 if (!obj) {
390                         debug_objects_enabled = 0;
391                         raw_spin_unlock_irqrestore(&db->lock, flags);
392                         debug_objects_oom();
393                         return;
394                 }
395                 check_object_on_stack = true;
396         }
397
398         switch (obj->state) {
399         case ODEBUG_STATE_NONE:
400         case ODEBUG_STATE_INIT:
401         case ODEBUG_STATE_INACTIVE:
402                 obj->state = ODEBUG_STATE_INIT;
403                 break;
404
405         case ODEBUG_STATE_ACTIVE:
406                 state = obj->state;
407                 raw_spin_unlock_irqrestore(&db->lock, flags);
408                 debug_print_object(obj, "init");
409                 debug_object_fixup(descr->fixup_init, addr, state);
410                 return;
411
412         case ODEBUG_STATE_DESTROYED:
413                 raw_spin_unlock_irqrestore(&db->lock, flags);
414                 debug_print_object(obj, "init");
415                 return;
416         default:
417                 break;
418         }
419
420         raw_spin_unlock_irqrestore(&db->lock, flags);
421         if (check_object_on_stack)
422                 debug_object_is_on_stack(addr, onstack);
423
424 }
425
426 /**
427  * debug_object_init - debug checks when an object is initialized
428  * @addr:       address of the object
429  * @descr:      pointer to an object specific debug description structure
430  */
431 void debug_object_init(void *addr, struct debug_obj_descr *descr)
432 {
433         if (!debug_objects_enabled)
434                 return;
435
436         __debug_object_init(addr, descr, 0);
437 }
438 EXPORT_SYMBOL_GPL(debug_object_init);
439
440 /**
441  * debug_object_init_on_stack - debug checks when an object on stack is
442  *                              initialized
443  * @addr:       address of the object
444  * @descr:      pointer to an object specific debug description structure
445  */
446 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
447 {
448         if (!debug_objects_enabled)
449                 return;
450
451         __debug_object_init(addr, descr, 1);
452 }
453 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
454
455 /**
456  * debug_object_activate - debug checks when an object is activated
457  * @addr:       address of the object
458  * @descr:      pointer to an object specific debug description structure
459  * Returns 0 for success, -EINVAL for check failed.
460  */
461 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
462 {
463         enum debug_obj_state state;
464         struct debug_bucket *db;
465         struct debug_obj *obj;
466         unsigned long flags;
467         int ret;
468         struct debug_obj o = { .object = addr,
469                                .state = ODEBUG_STATE_NOTAVAILABLE,
470                                .descr = descr };
471
472         if (!debug_objects_enabled)
473                 return 0;
474
475         db = get_bucket((unsigned long) addr);
476
477         raw_spin_lock_irqsave(&db->lock, flags);
478
479         obj = lookup_object(addr, db);
480         if (obj) {
481                 ret = 0;
482                 switch (obj->state) {
483                 case ODEBUG_STATE_INIT:
484                 case ODEBUG_STATE_INACTIVE:
485                         obj->state = ODEBUG_STATE_ACTIVE;
486                         break;
487
488                 case ODEBUG_STATE_ACTIVE:
489                         state = obj->state;
490                         raw_spin_unlock_irqrestore(&db->lock, flags);
491                         debug_print_object(obj, "activate");
492                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
493                         return ret ? 0 : -EINVAL;
494
495                 case ODEBUG_STATE_DESTROYED:
496                         ret = -EINVAL;
497                         break;
498                 default:
499                         break;
500                 }
501                 raw_spin_unlock_irqrestore(&db->lock, flags);
502                 if (ret)
503                         debug_print_object(obj, "activate");
504                 return ret;
505         }
506
507         raw_spin_unlock_irqrestore(&db->lock, flags);
508
509         /*
510          * We are here when a static object is activated. We
511          * let the type specific code confirm whether this is
512          * true or not. if true, we just make sure that the
513          * static object is tracked in the object tracker. If
514          * not, this must be a bug, so we try to fix it up.
515          */
516         if (descr->is_static_object && descr->is_static_object(addr)) {
517                 /* track this static object */
518                 debug_object_init(addr, descr);
519                 debug_object_activate(addr, descr);
520         } else {
521                 debug_print_object(&o, "activate");
522                 ret = debug_object_fixup(descr->fixup_activate, addr,
523                                         ODEBUG_STATE_NOTAVAILABLE);
524                 return ret ? 0 : -EINVAL;
525         }
526         return 0;
527 }
528 EXPORT_SYMBOL_GPL(debug_object_activate);
529
530 /**
531  * debug_object_deactivate - debug checks when an object is deactivated
532  * @addr:       address of the object
533  * @descr:      pointer to an object specific debug description structure
534  */
535 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
536 {
537         struct debug_bucket *db;
538         struct debug_obj *obj;
539         unsigned long flags;
540
541         if (!debug_objects_enabled)
542                 return;
543
544         db = get_bucket((unsigned long) addr);
545
546         raw_spin_lock_irqsave(&db->lock, flags);
547
548         obj = lookup_object(addr, db);
549         if (obj) {
550                 switch (obj->state) {
551                 case ODEBUG_STATE_INIT:
552                 case ODEBUG_STATE_INACTIVE:
553                 case ODEBUG_STATE_ACTIVE:
554                         if (!obj->astate)
555                                 obj->state = ODEBUG_STATE_INACTIVE;
556                         else
557                                 goto out_unlock_print;
558                         break;
559
560                 case ODEBUG_STATE_DESTROYED:
561                         goto out_unlock_print;
562
563                 default:
564                         break;
565                 }
566         }
567
568         raw_spin_unlock_irqrestore(&db->lock, flags);
569         if (!obj) {
570                 struct debug_obj o = { .object = addr,
571                                        .state = ODEBUG_STATE_NOTAVAILABLE,
572                                        .descr = descr };
573
574                 debug_print_object(&o, "deactivate");
575         }
576         return;
577
578 out_unlock_print:
579         raw_spin_unlock_irqrestore(&db->lock, flags);
580         debug_print_object(obj, "deactivate");
581 }
582 EXPORT_SYMBOL_GPL(debug_object_deactivate);
583
584 /**
585  * debug_object_destroy - debug checks when an object is destroyed
586  * @addr:       address of the object
587  * @descr:      pointer to an object specific debug description structure
588  */
589 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
590 {
591         enum debug_obj_state state;
592         struct debug_bucket *db;
593         struct debug_obj *obj;
594         unsigned long flags;
595
596         if (!debug_objects_enabled)
597                 return;
598
599         db = get_bucket((unsigned long) addr);
600
601         raw_spin_lock_irqsave(&db->lock, flags);
602
603         obj = lookup_object(addr, db);
604         if (!obj)
605                 goto out_unlock;
606
607         switch (obj->state) {
608         case ODEBUG_STATE_NONE:
609         case ODEBUG_STATE_INIT:
610         case ODEBUG_STATE_INACTIVE:
611                 obj->state = ODEBUG_STATE_DESTROYED;
612                 break;
613         case ODEBUG_STATE_ACTIVE:
614                 state = obj->state;
615                 raw_spin_unlock_irqrestore(&db->lock, flags);
616                 debug_print_object(obj, "destroy");
617                 debug_object_fixup(descr->fixup_destroy, addr, state);
618                 return;
619
620         case ODEBUG_STATE_DESTROYED:
621                 raw_spin_unlock_irqrestore(&db->lock, flags);
622                 debug_print_object(obj, "destroy");
623                 return;
624         default:
625                 break;
626         }
627 out_unlock:
628         raw_spin_unlock_irqrestore(&db->lock, flags);
629 }
630 EXPORT_SYMBOL_GPL(debug_object_destroy);
631
632 /**
633  * debug_object_free - debug checks when an object is freed
634  * @addr:       address of the object
635  * @descr:      pointer to an object specific debug description structure
636  */
637 void debug_object_free(void *addr, struct debug_obj_descr *descr)
638 {
639         enum debug_obj_state state;
640         struct debug_bucket *db;
641         struct debug_obj *obj;
642         unsigned long flags;
643
644         if (!debug_objects_enabled)
645                 return;
646
647         db = get_bucket((unsigned long) addr);
648
649         raw_spin_lock_irqsave(&db->lock, flags);
650
651         obj = lookup_object(addr, db);
652         if (!obj)
653                 goto out_unlock;
654
655         switch (obj->state) {
656         case ODEBUG_STATE_ACTIVE:
657                 state = obj->state;
658                 raw_spin_unlock_irqrestore(&db->lock, flags);
659                 debug_print_object(obj, "free");
660                 debug_object_fixup(descr->fixup_free, addr, state);
661                 return;
662         default:
663                 hlist_del(&obj->node);
664                 raw_spin_unlock_irqrestore(&db->lock, flags);
665                 free_object(obj);
666                 return;
667         }
668 out_unlock:
669         raw_spin_unlock_irqrestore(&db->lock, flags);
670 }
671 EXPORT_SYMBOL_GPL(debug_object_free);
672
673 /**
674  * debug_object_assert_init - debug checks when object should be init-ed
675  * @addr:       address of the object
676  * @descr:      pointer to an object specific debug description structure
677  */
678 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
679 {
680         struct debug_bucket *db;
681         struct debug_obj *obj;
682         unsigned long flags;
683
684         if (!debug_objects_enabled)
685                 return;
686
687         db = get_bucket((unsigned long) addr);
688
689         raw_spin_lock_irqsave(&db->lock, flags);
690
691         obj = lookup_object(addr, db);
692         if (!obj) {
693                 struct debug_obj o = { .object = addr,
694                                        .state = ODEBUG_STATE_NOTAVAILABLE,
695                                        .descr = descr };
696
697                 raw_spin_unlock_irqrestore(&db->lock, flags);
698                 /*
699                  * Maybe the object is static, and we let the type specific
700                  * code confirm. Track this static object if true, else invoke
701                  * fixup.
702                  */
703                 if (descr->is_static_object && descr->is_static_object(addr)) {
704                         /* Track this static object */
705                         debug_object_init(addr, descr);
706                 } else {
707                         debug_print_object(&o, "assert_init");
708                         debug_object_fixup(descr->fixup_assert_init, addr,
709                                            ODEBUG_STATE_NOTAVAILABLE);
710                 }
711                 return;
712         }
713
714         raw_spin_unlock_irqrestore(&db->lock, flags);
715 }
716 EXPORT_SYMBOL_GPL(debug_object_assert_init);
717
718 /**
719  * debug_object_active_state - debug checks object usage state machine
720  * @addr:       address of the object
721  * @descr:      pointer to an object specific debug description structure
722  * @expect:     expected state
723  * @next:       state to move to if expected state is found
724  */
725 void
726 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
727                           unsigned int expect, unsigned int next)
728 {
729         struct debug_bucket *db;
730         struct debug_obj *obj;
731         unsigned long flags;
732
733         if (!debug_objects_enabled)
734                 return;
735
736         db = get_bucket((unsigned long) addr);
737
738         raw_spin_lock_irqsave(&db->lock, flags);
739
740         obj = lookup_object(addr, db);
741         if (obj) {
742                 switch (obj->state) {
743                 case ODEBUG_STATE_ACTIVE:
744                         if (obj->astate == expect)
745                                 obj->astate = next;
746                         else
747                                 goto out_unlock_print;
748                         break;
749
750                 default:
751                         goto out_unlock_print;
752                 }
753         }
754
755         raw_spin_unlock_irqrestore(&db->lock, flags);
756         if (!obj) {
757                 struct debug_obj o = { .object = addr,
758                                        .state = ODEBUG_STATE_NOTAVAILABLE,
759                                        .descr = descr };
760
761                 debug_print_object(&o, "active_state");
762         }
763         return;
764
765 out_unlock_print:
766         raw_spin_unlock_irqrestore(&db->lock, flags);
767         debug_print_object(obj, "active_state");
768 }
769 EXPORT_SYMBOL_GPL(debug_object_active_state);
770
771 #ifdef CONFIG_DEBUG_OBJECTS_FREE
772 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
773 {
774         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
775         struct debug_obj_descr *descr;
776         enum debug_obj_state state;
777         struct debug_bucket *db;
778         struct hlist_node *tmp;
779         struct debug_obj *obj;
780         int cnt, objs_checked = 0;
781         bool work = false;
782
783         saddr = (unsigned long) address;
784         eaddr = saddr + size;
785         paddr = saddr & ODEBUG_CHUNK_MASK;
786         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
787         chunks >>= ODEBUG_CHUNK_SHIFT;
788
789         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
790                 db = get_bucket(paddr);
791
792 repeat:
793                 cnt = 0;
794                 raw_spin_lock_irqsave(&db->lock, flags);
795                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
796                         cnt++;
797                         oaddr = (unsigned long) obj->object;
798                         if (oaddr < saddr || oaddr >= eaddr)
799                                 continue;
800
801                         switch (obj->state) {
802                         case ODEBUG_STATE_ACTIVE:
803                                 descr = obj->descr;
804                                 state = obj->state;
805                                 raw_spin_unlock_irqrestore(&db->lock, flags);
806                                 debug_print_object(obj, "free");
807                                 debug_object_fixup(descr->fixup_free,
808                                                    (void *) oaddr, state);
809                                 goto repeat;
810                         default:
811                                 hlist_del(&obj->node);
812                                 work |= __free_object(obj);
813                                 break;
814                         }
815                 }
816                 raw_spin_unlock_irqrestore(&db->lock, flags);
817
818                 if (cnt > debug_objects_maxchain)
819                         debug_objects_maxchain = cnt;
820
821                 objs_checked += cnt;
822         }
823
824         if (objs_checked > debug_objects_maxchecked)
825                 debug_objects_maxchecked = objs_checked;
826
827         /* Schedule work to actually kmem_cache_free() objects */
828         if (work)
829                 schedule_work(&debug_obj_work);
830 }
831
832 void debug_check_no_obj_freed(const void *address, unsigned long size)
833 {
834         if (debug_objects_enabled)
835                 __debug_check_no_obj_freed(address, size);
836 }
837 #endif
838
839 #ifdef CONFIG_DEBUG_FS
840
841 static int debug_stats_show(struct seq_file *m, void *v)
842 {
843         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
844         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
845         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
846         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
847         seq_printf(m, "pool_free     :%d\n", obj_pool_free);
848         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
849         seq_printf(m, "pool_used     :%d\n", obj_pool_used);
850         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
851         seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
852         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
853         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
854         return 0;
855 }
856
857 static int debug_stats_open(struct inode *inode, struct file *filp)
858 {
859         return single_open(filp, debug_stats_show, NULL);
860 }
861
862 static const struct file_operations debug_stats_fops = {
863         .open           = debug_stats_open,
864         .read           = seq_read,
865         .llseek         = seq_lseek,
866         .release        = single_release,
867 };
868
869 static int __init debug_objects_init_debugfs(void)
870 {
871         struct dentry *dbgdir, *dbgstats;
872
873         if (!debug_objects_enabled)
874                 return 0;
875
876         dbgdir = debugfs_create_dir("debug_objects", NULL);
877         if (!dbgdir)
878                 return -ENOMEM;
879
880         dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
881                                        &debug_stats_fops);
882         if (!dbgstats)
883                 goto err;
884
885         return 0;
886
887 err:
888         debugfs_remove(dbgdir);
889
890         return -ENOMEM;
891 }
892 __initcall(debug_objects_init_debugfs);
893
894 #else
895 static inline void debug_objects_init_debugfs(void) { }
896 #endif
897
898 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
899
900 /* Random data structure for the self test */
901 struct self_test {
902         unsigned long   dummy1[6];
903         int             static_init;
904         unsigned long   dummy2[3];
905 };
906
907 static __initdata struct debug_obj_descr descr_type_test;
908
909 static bool __init is_static_object(void *addr)
910 {
911         struct self_test *obj = addr;
912
913         return obj->static_init;
914 }
915
916 /*
917  * fixup_init is called when:
918  * - an active object is initialized
919  */
920 static bool __init fixup_init(void *addr, enum debug_obj_state state)
921 {
922         struct self_test *obj = addr;
923
924         switch (state) {
925         case ODEBUG_STATE_ACTIVE:
926                 debug_object_deactivate(obj, &descr_type_test);
927                 debug_object_init(obj, &descr_type_test);
928                 return true;
929         default:
930                 return false;
931         }
932 }
933
934 /*
935  * fixup_activate is called when:
936  * - an active object is activated
937  * - an unknown non-static object is activated
938  */
939 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
940 {
941         struct self_test *obj = addr;
942
943         switch (state) {
944         case ODEBUG_STATE_NOTAVAILABLE:
945                 return true;
946         case ODEBUG_STATE_ACTIVE:
947                 debug_object_deactivate(obj, &descr_type_test);
948                 debug_object_activate(obj, &descr_type_test);
949                 return true;
950
951         default:
952                 return false;
953         }
954 }
955
956 /*
957  * fixup_destroy is called when:
958  * - an active object is destroyed
959  */
960 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
961 {
962         struct self_test *obj = addr;
963
964         switch (state) {
965         case ODEBUG_STATE_ACTIVE:
966                 debug_object_deactivate(obj, &descr_type_test);
967                 debug_object_destroy(obj, &descr_type_test);
968                 return true;
969         default:
970                 return false;
971         }
972 }
973
974 /*
975  * fixup_free is called when:
976  * - an active object is freed
977  */
978 static bool __init fixup_free(void *addr, enum debug_obj_state state)
979 {
980         struct self_test *obj = addr;
981
982         switch (state) {
983         case ODEBUG_STATE_ACTIVE:
984                 debug_object_deactivate(obj, &descr_type_test);
985                 debug_object_free(obj, &descr_type_test);
986                 return true;
987         default:
988                 return false;
989         }
990 }
991
992 static int __init
993 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
994 {
995         struct debug_bucket *db;
996         struct debug_obj *obj;
997         unsigned long flags;
998         int res = -EINVAL;
999         enum debug_obj_state obj_state;
1000
1001         db = get_bucket((unsigned long) addr);
1002
1003         raw_spin_lock_irqsave(&db->lock, flags);
1004
1005         obj = lookup_object(addr, db);
1006         obj_state = obj ? obj->state : state;
1007
1008         raw_spin_unlock_irqrestore(&db->lock, flags);
1009
1010         if (!obj && state != ODEBUG_STATE_NONE) {
1011                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1012                 goto out;
1013         }
1014         if (obj_state != state) {
1015                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1016                        obj_state, state);
1017                 goto out;
1018         }
1019         if (fixups != debug_objects_fixups) {
1020                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1021                        fixups, debug_objects_fixups);
1022                 goto out;
1023         }
1024         if (warnings != debug_objects_warnings) {
1025                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1026                        warnings, debug_objects_warnings);
1027                 goto out;
1028         }
1029         res = 0;
1030 out:
1031         if (res)
1032                 debug_objects_enabled = 0;
1033         return res;
1034 }
1035
1036 static __initdata struct debug_obj_descr descr_type_test = {
1037         .name                   = "selftest",
1038         .is_static_object       = is_static_object,
1039         .fixup_init             = fixup_init,
1040         .fixup_activate         = fixup_activate,
1041         .fixup_destroy          = fixup_destroy,
1042         .fixup_free             = fixup_free,
1043 };
1044
1045 static __initdata struct self_test obj = { .static_init = 0 };
1046
1047 static void __init debug_objects_selftest(void)
1048 {
1049         int fixups, oldfixups, warnings, oldwarnings;
1050         unsigned long flags;
1051
1052         local_irq_save(flags);
1053
1054         fixups = oldfixups = debug_objects_fixups;
1055         warnings = oldwarnings = debug_objects_warnings;
1056         descr_test = &descr_type_test;
1057
1058         debug_object_init(&obj, &descr_type_test);
1059         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1060                 goto out;
1061         debug_object_activate(&obj, &descr_type_test);
1062         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1063                 goto out;
1064         debug_object_activate(&obj, &descr_type_test);
1065         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1066                 goto out;
1067         debug_object_deactivate(&obj, &descr_type_test);
1068         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1069                 goto out;
1070         debug_object_destroy(&obj, &descr_type_test);
1071         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1072                 goto out;
1073         debug_object_init(&obj, &descr_type_test);
1074         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1075                 goto out;
1076         debug_object_activate(&obj, &descr_type_test);
1077         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1078                 goto out;
1079         debug_object_deactivate(&obj, &descr_type_test);
1080         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1081                 goto out;
1082         debug_object_free(&obj, &descr_type_test);
1083         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1084                 goto out;
1085
1086         obj.static_init = 1;
1087         debug_object_activate(&obj, &descr_type_test);
1088         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1089                 goto out;
1090         debug_object_init(&obj, &descr_type_test);
1091         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1092                 goto out;
1093         debug_object_free(&obj, &descr_type_test);
1094         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1095                 goto out;
1096
1097 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1098         debug_object_init(&obj, &descr_type_test);
1099         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1100                 goto out;
1101         debug_object_activate(&obj, &descr_type_test);
1102         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1103                 goto out;
1104         __debug_check_no_obj_freed(&obj, sizeof(obj));
1105         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1106                 goto out;
1107 #endif
1108         pr_info("selftest passed\n");
1109
1110 out:
1111         debug_objects_fixups = oldfixups;
1112         debug_objects_warnings = oldwarnings;
1113         descr_test = NULL;
1114
1115         local_irq_restore(flags);
1116 }
1117 #else
1118 static inline void debug_objects_selftest(void) { }
1119 #endif
1120
1121 /*
1122  * Called during early boot to initialize the hash buckets and link
1123  * the static object pool objects into the poll list. After this call
1124  * the object tracker is fully operational.
1125  */
1126 void __init debug_objects_early_init(void)
1127 {
1128         int i;
1129
1130         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1131                 raw_spin_lock_init(&obj_hash[i].lock);
1132
1133         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1134                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1135 }
1136
1137 /*
1138  * Convert the statically allocated objects to dynamic ones:
1139  */
1140 static int __init debug_objects_replace_static_objects(void)
1141 {
1142         struct debug_bucket *db = obj_hash;
1143         struct hlist_node *tmp;
1144         struct debug_obj *obj, *new;
1145         HLIST_HEAD(objects);
1146         int i, cnt = 0;
1147
1148         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1149                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1150                 if (!obj)
1151                         goto free;
1152                 hlist_add_head(&obj->node, &objects);
1153         }
1154
1155         /*
1156          * debug_objects_mem_init() is now called early that only one CPU is up
1157          * and interrupts have been disabled, so it is safe to replace the
1158          * active object references.
1159          */
1160
1161         /* Remove the statically allocated objects from the pool */
1162         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1163                 hlist_del(&obj->node);
1164         /* Move the allocated objects to the pool */
1165         hlist_move_list(&objects, &obj_pool);
1166
1167         /* Replace the active object references */
1168         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1169                 hlist_move_list(&db->list, &objects);
1170
1171                 hlist_for_each_entry(obj, &objects, node) {
1172                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1173                         hlist_del(&new->node);
1174                         /* copy object data */
1175                         *new = *obj;
1176                         hlist_add_head(&new->node, &db->list);
1177                         cnt++;
1178                 }
1179         }
1180
1181         pr_debug("%d of %d active objects replaced\n",
1182                  cnt, obj_pool_used);
1183         return 0;
1184 free:
1185         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1186                 hlist_del(&obj->node);
1187                 kmem_cache_free(obj_cache, obj);
1188         }
1189         return -ENOMEM;
1190 }
1191
1192 /*
1193  * Called after the kmem_caches are functional to setup a dedicated
1194  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1195  * prevents that the debug code is called on kmem_cache_free() for the
1196  * debug tracker objects to avoid recursive calls.
1197  */
1198 void __init debug_objects_mem_init(void)
1199 {
1200         if (!debug_objects_enabled)
1201                 return;
1202
1203         obj_cache = kmem_cache_create("debug_objects_cache",
1204                                       sizeof (struct debug_obj), 0,
1205                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1206                                       NULL);
1207
1208         if (!obj_cache || debug_objects_replace_static_objects()) {
1209                 debug_objects_enabled = 0;
1210                 kmem_cache_destroy(obj_cache);
1211                 pr_warn("out of memory.\n");
1212         } else
1213                 debug_objects_selftest();
1214
1215         /*
1216          * Increase the thresholds for allocating and freeing objects
1217          * according to the number of possible CPUs available in the system.
1218          */
1219         debug_objects_pool_size += num_possible_cpus() * 32;
1220         debug_objects_pool_min_level += num_possible_cpus() * 4;
1221 }