2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
23 #define ODEBUG_HASH_BITS 14
24 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26 #define ODEBUG_POOL_SIZE 1024
27 #define ODEBUG_POOL_MIN_LEVEL 256
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
34 struct hlist_head list;
38 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
40 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
42 static DEFINE_RAW_SPINLOCK(pool_lock);
44 static HLIST_HEAD(obj_pool);
45 static HLIST_HEAD(obj_to_free);
47 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
48 static int obj_pool_free = ODEBUG_POOL_SIZE;
49 static int obj_pool_used;
50 static int obj_pool_max_used;
51 /* The number of objs on the global free list */
52 static int obj_nr_tofree;
53 static struct kmem_cache *obj_cache;
55 static int debug_objects_maxchain __read_mostly;
56 static int __maybe_unused debug_objects_maxchecked __read_mostly;
57 static int debug_objects_fixups __read_mostly;
58 static int debug_objects_warnings __read_mostly;
59 static int debug_objects_enabled __read_mostly
60 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
61 static int debug_objects_pool_size __read_mostly
63 static int debug_objects_pool_min_level __read_mostly
64 = ODEBUG_POOL_MIN_LEVEL;
65 static struct debug_obj_descr *descr_test __read_mostly;
68 * Track numbers of kmem_cache_alloc()/free() calls done.
70 static int debug_objects_allocated;
71 static int debug_objects_freed;
73 static void free_obj_work(struct work_struct *work);
74 static DECLARE_WORK(debug_obj_work, free_obj_work);
76 static int __init enable_object_debug(char *str)
78 debug_objects_enabled = 1;
82 static int __init disable_object_debug(char *str)
84 debug_objects_enabled = 0;
88 early_param("debug_objects", enable_object_debug);
89 early_param("no_debug_objects", disable_object_debug);
91 static const char *obj_states[ODEBUG_STATE_MAX] = {
92 [ODEBUG_STATE_NONE] = "none",
93 [ODEBUG_STATE_INIT] = "initialized",
94 [ODEBUG_STATE_INACTIVE] = "inactive",
95 [ODEBUG_STATE_ACTIVE] = "active",
96 [ODEBUG_STATE_DESTROYED] = "destroyed",
97 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
100 static void fill_pool(void)
102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103 struct debug_obj *new, *obj;
106 if (likely(obj_pool_free >= debug_objects_pool_min_level))
110 * Reuse objs from the global free list; they will be reinitialized
113 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 raw_spin_lock_irqsave(&pool_lock, flags);
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node);
123 hlist_add_head(&obj->node, &obj_pool);
126 raw_spin_unlock_irqrestore(&pool_lock, flags);
129 if (unlikely(!obj_cache))
132 while (obj_pool_free < debug_objects_pool_min_level) {
134 new = kmem_cache_zalloc(obj_cache, gfp);
138 raw_spin_lock_irqsave(&pool_lock, flags);
139 hlist_add_head(&new->node, &obj_pool);
140 debug_objects_allocated++;
142 raw_spin_unlock_irqrestore(&pool_lock, flags);
147 * Lookup an object in the hash bucket.
149 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
151 struct debug_obj *obj;
154 hlist_for_each_entry(obj, &b->list, node) {
156 if (obj->object == addr)
159 if (cnt > debug_objects_maxchain)
160 debug_objects_maxchain = cnt;
166 * Allocate a new object. If the pool is empty, switch off the debugger.
167 * Must be called with interrupts disabled.
169 static struct debug_obj *
170 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
172 struct debug_obj *obj = NULL;
174 raw_spin_lock(&pool_lock);
175 if (obj_pool.first) {
176 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
180 obj->state = ODEBUG_STATE_NONE;
182 hlist_del(&obj->node);
184 hlist_add_head(&obj->node, &b->list);
187 if (obj_pool_used > obj_pool_max_used)
188 obj_pool_max_used = obj_pool_used;
191 if (obj_pool_free < obj_pool_min_free)
192 obj_pool_min_free = obj_pool_free;
194 raw_spin_unlock(&pool_lock);
200 * workqueue function to free objects.
202 * To reduce contention on the global pool_lock, the actual freeing of
203 * debug objects will be delayed if the pool_lock is busy.
205 static void free_obj_work(struct work_struct *work)
207 struct hlist_node *tmp;
208 struct debug_obj *obj;
212 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
216 * The objs on the pool list might be allocated before the work is
217 * run, so recheck if pool list it full or not, if not fill pool
218 * list from the global free list
220 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
221 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
222 hlist_del(&obj->node);
223 hlist_add_head(&obj->node, &obj_pool);
229 * Pool list is already full and there are still objs on the free
230 * list. Move remaining free objs to a temporary list to free the
231 * memory outside the pool_lock held region.
234 hlist_move_list(&obj_to_free, &tofree);
235 debug_objects_freed += obj_nr_tofree;
238 raw_spin_unlock_irqrestore(&pool_lock, flags);
240 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
241 hlist_del(&obj->node);
242 kmem_cache_free(obj_cache, obj);
246 static bool __free_object(struct debug_obj *obj)
251 raw_spin_lock_irqsave(&pool_lock, flags);
252 work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
257 hlist_add_head(&obj->node, &obj_to_free);
260 hlist_add_head(&obj->node, &obj_pool);
262 raw_spin_unlock_irqrestore(&pool_lock, flags);
267 * Put the object back into the pool and schedule work to free objects
270 static void free_object(struct debug_obj *obj)
272 if (__free_object(obj))
273 schedule_work(&debug_obj_work);
277 * We run out of memory. That means we probably have tons of objects
280 static void debug_objects_oom(void)
282 struct debug_bucket *db = obj_hash;
283 struct hlist_node *tmp;
284 HLIST_HEAD(freelist);
285 struct debug_obj *obj;
289 pr_warn("Out of memory. ODEBUG disabled\n");
291 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
292 raw_spin_lock_irqsave(&db->lock, flags);
293 hlist_move_list(&db->list, &freelist);
294 raw_spin_unlock_irqrestore(&db->lock, flags);
297 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
298 hlist_del(&obj->node);
305 * We use the pfn of the address for the hash. That way we can check
306 * for freed objects simply by checking the affected bucket.
308 static struct debug_bucket *get_bucket(unsigned long addr)
312 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
313 return &obj_hash[hash];
316 static void debug_print_object(struct debug_obj *obj, char *msg)
318 struct debug_obj_descr *descr = obj->descr;
321 if (limit < 5 && descr != descr_test) {
322 void *hint = descr->debug_hint ?
323 descr->debug_hint(obj->object) : NULL;
325 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
326 "object type: %s hint: %pS\n",
327 msg, obj_states[obj->state], obj->astate,
330 debug_objects_warnings++;
334 * Try to repair the damage, so we have a better chance to get useful
338 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
339 void * addr, enum debug_obj_state state)
341 if (fixup && fixup(addr, state)) {
342 debug_objects_fixups++;
348 static void debug_object_is_on_stack(void *addr, int onstack)
356 is_on_stack = object_is_on_stack(addr);
357 if (is_on_stack == onstack)
362 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
363 task_stack_page(current));
365 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
366 task_stack_page(current));
372 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
374 enum debug_obj_state state;
375 struct debug_bucket *db;
376 struct debug_obj *obj;
378 bool check_object_on_stack = false;
382 db = get_bucket((unsigned long) addr);
384 raw_spin_lock_irqsave(&db->lock, flags);
386 obj = lookup_object(addr, db);
388 obj = alloc_object(addr, db, descr);
390 debug_objects_enabled = 0;
391 raw_spin_unlock_irqrestore(&db->lock, flags);
395 check_object_on_stack = true;
398 switch (obj->state) {
399 case ODEBUG_STATE_NONE:
400 case ODEBUG_STATE_INIT:
401 case ODEBUG_STATE_INACTIVE:
402 obj->state = ODEBUG_STATE_INIT;
405 case ODEBUG_STATE_ACTIVE:
407 raw_spin_unlock_irqrestore(&db->lock, flags);
408 debug_print_object(obj, "init");
409 debug_object_fixup(descr->fixup_init, addr, state);
412 case ODEBUG_STATE_DESTROYED:
413 raw_spin_unlock_irqrestore(&db->lock, flags);
414 debug_print_object(obj, "init");
420 raw_spin_unlock_irqrestore(&db->lock, flags);
421 if (check_object_on_stack)
422 debug_object_is_on_stack(addr, onstack);
427 * debug_object_init - debug checks when an object is initialized
428 * @addr: address of the object
429 * @descr: pointer to an object specific debug description structure
431 void debug_object_init(void *addr, struct debug_obj_descr *descr)
433 if (!debug_objects_enabled)
436 __debug_object_init(addr, descr, 0);
438 EXPORT_SYMBOL_GPL(debug_object_init);
441 * debug_object_init_on_stack - debug checks when an object on stack is
443 * @addr: address of the object
444 * @descr: pointer to an object specific debug description structure
446 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
448 if (!debug_objects_enabled)
451 __debug_object_init(addr, descr, 1);
453 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
456 * debug_object_activate - debug checks when an object is activated
457 * @addr: address of the object
458 * @descr: pointer to an object specific debug description structure
459 * Returns 0 for success, -EINVAL for check failed.
461 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
463 enum debug_obj_state state;
464 struct debug_bucket *db;
465 struct debug_obj *obj;
468 struct debug_obj o = { .object = addr,
469 .state = ODEBUG_STATE_NOTAVAILABLE,
472 if (!debug_objects_enabled)
475 db = get_bucket((unsigned long) addr);
477 raw_spin_lock_irqsave(&db->lock, flags);
479 obj = lookup_object(addr, db);
482 switch (obj->state) {
483 case ODEBUG_STATE_INIT:
484 case ODEBUG_STATE_INACTIVE:
485 obj->state = ODEBUG_STATE_ACTIVE;
488 case ODEBUG_STATE_ACTIVE:
490 raw_spin_unlock_irqrestore(&db->lock, flags);
491 debug_print_object(obj, "activate");
492 ret = debug_object_fixup(descr->fixup_activate, addr, state);
493 return ret ? 0 : -EINVAL;
495 case ODEBUG_STATE_DESTROYED:
501 raw_spin_unlock_irqrestore(&db->lock, flags);
503 debug_print_object(obj, "activate");
507 raw_spin_unlock_irqrestore(&db->lock, flags);
510 * We are here when a static object is activated. We
511 * let the type specific code confirm whether this is
512 * true or not. if true, we just make sure that the
513 * static object is tracked in the object tracker. If
514 * not, this must be a bug, so we try to fix it up.
516 if (descr->is_static_object && descr->is_static_object(addr)) {
517 /* track this static object */
518 debug_object_init(addr, descr);
519 debug_object_activate(addr, descr);
521 debug_print_object(&o, "activate");
522 ret = debug_object_fixup(descr->fixup_activate, addr,
523 ODEBUG_STATE_NOTAVAILABLE);
524 return ret ? 0 : -EINVAL;
528 EXPORT_SYMBOL_GPL(debug_object_activate);
531 * debug_object_deactivate - debug checks when an object is deactivated
532 * @addr: address of the object
533 * @descr: pointer to an object specific debug description structure
535 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
537 struct debug_bucket *db;
538 struct debug_obj *obj;
541 if (!debug_objects_enabled)
544 db = get_bucket((unsigned long) addr);
546 raw_spin_lock_irqsave(&db->lock, flags);
548 obj = lookup_object(addr, db);
550 switch (obj->state) {
551 case ODEBUG_STATE_INIT:
552 case ODEBUG_STATE_INACTIVE:
553 case ODEBUG_STATE_ACTIVE:
555 obj->state = ODEBUG_STATE_INACTIVE;
557 goto out_unlock_print;
560 case ODEBUG_STATE_DESTROYED:
561 goto out_unlock_print;
568 raw_spin_unlock_irqrestore(&db->lock, flags);
570 struct debug_obj o = { .object = addr,
571 .state = ODEBUG_STATE_NOTAVAILABLE,
574 debug_print_object(&o, "deactivate");
579 raw_spin_unlock_irqrestore(&db->lock, flags);
580 debug_print_object(obj, "deactivate");
582 EXPORT_SYMBOL_GPL(debug_object_deactivate);
585 * debug_object_destroy - debug checks when an object is destroyed
586 * @addr: address of the object
587 * @descr: pointer to an object specific debug description structure
589 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
591 enum debug_obj_state state;
592 struct debug_bucket *db;
593 struct debug_obj *obj;
596 if (!debug_objects_enabled)
599 db = get_bucket((unsigned long) addr);
601 raw_spin_lock_irqsave(&db->lock, flags);
603 obj = lookup_object(addr, db);
607 switch (obj->state) {
608 case ODEBUG_STATE_NONE:
609 case ODEBUG_STATE_INIT:
610 case ODEBUG_STATE_INACTIVE:
611 obj->state = ODEBUG_STATE_DESTROYED;
613 case ODEBUG_STATE_ACTIVE:
615 raw_spin_unlock_irqrestore(&db->lock, flags);
616 debug_print_object(obj, "destroy");
617 debug_object_fixup(descr->fixup_destroy, addr, state);
620 case ODEBUG_STATE_DESTROYED:
621 raw_spin_unlock_irqrestore(&db->lock, flags);
622 debug_print_object(obj, "destroy");
628 raw_spin_unlock_irqrestore(&db->lock, flags);
630 EXPORT_SYMBOL_GPL(debug_object_destroy);
633 * debug_object_free - debug checks when an object is freed
634 * @addr: address of the object
635 * @descr: pointer to an object specific debug description structure
637 void debug_object_free(void *addr, struct debug_obj_descr *descr)
639 enum debug_obj_state state;
640 struct debug_bucket *db;
641 struct debug_obj *obj;
644 if (!debug_objects_enabled)
647 db = get_bucket((unsigned long) addr);
649 raw_spin_lock_irqsave(&db->lock, flags);
651 obj = lookup_object(addr, db);
655 switch (obj->state) {
656 case ODEBUG_STATE_ACTIVE:
658 raw_spin_unlock_irqrestore(&db->lock, flags);
659 debug_print_object(obj, "free");
660 debug_object_fixup(descr->fixup_free, addr, state);
663 hlist_del(&obj->node);
664 raw_spin_unlock_irqrestore(&db->lock, flags);
669 raw_spin_unlock_irqrestore(&db->lock, flags);
671 EXPORT_SYMBOL_GPL(debug_object_free);
674 * debug_object_assert_init - debug checks when object should be init-ed
675 * @addr: address of the object
676 * @descr: pointer to an object specific debug description structure
678 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
680 struct debug_bucket *db;
681 struct debug_obj *obj;
684 if (!debug_objects_enabled)
687 db = get_bucket((unsigned long) addr);
689 raw_spin_lock_irqsave(&db->lock, flags);
691 obj = lookup_object(addr, db);
693 struct debug_obj o = { .object = addr,
694 .state = ODEBUG_STATE_NOTAVAILABLE,
697 raw_spin_unlock_irqrestore(&db->lock, flags);
699 * Maybe the object is static, and we let the type specific
700 * code confirm. Track this static object if true, else invoke
703 if (descr->is_static_object && descr->is_static_object(addr)) {
704 /* Track this static object */
705 debug_object_init(addr, descr);
707 debug_print_object(&o, "assert_init");
708 debug_object_fixup(descr->fixup_assert_init, addr,
709 ODEBUG_STATE_NOTAVAILABLE);
714 raw_spin_unlock_irqrestore(&db->lock, flags);
716 EXPORT_SYMBOL_GPL(debug_object_assert_init);
719 * debug_object_active_state - debug checks object usage state machine
720 * @addr: address of the object
721 * @descr: pointer to an object specific debug description structure
722 * @expect: expected state
723 * @next: state to move to if expected state is found
726 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
727 unsigned int expect, unsigned int next)
729 struct debug_bucket *db;
730 struct debug_obj *obj;
733 if (!debug_objects_enabled)
736 db = get_bucket((unsigned long) addr);
738 raw_spin_lock_irqsave(&db->lock, flags);
740 obj = lookup_object(addr, db);
742 switch (obj->state) {
743 case ODEBUG_STATE_ACTIVE:
744 if (obj->astate == expect)
747 goto out_unlock_print;
751 goto out_unlock_print;
755 raw_spin_unlock_irqrestore(&db->lock, flags);
757 struct debug_obj o = { .object = addr,
758 .state = ODEBUG_STATE_NOTAVAILABLE,
761 debug_print_object(&o, "active_state");
766 raw_spin_unlock_irqrestore(&db->lock, flags);
767 debug_print_object(obj, "active_state");
769 EXPORT_SYMBOL_GPL(debug_object_active_state);
771 #ifdef CONFIG_DEBUG_OBJECTS_FREE
772 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
774 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
775 struct debug_obj_descr *descr;
776 enum debug_obj_state state;
777 struct debug_bucket *db;
778 struct hlist_node *tmp;
779 struct debug_obj *obj;
780 int cnt, objs_checked = 0;
783 saddr = (unsigned long) address;
784 eaddr = saddr + size;
785 paddr = saddr & ODEBUG_CHUNK_MASK;
786 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
787 chunks >>= ODEBUG_CHUNK_SHIFT;
789 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
790 db = get_bucket(paddr);
794 raw_spin_lock_irqsave(&db->lock, flags);
795 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
797 oaddr = (unsigned long) obj->object;
798 if (oaddr < saddr || oaddr >= eaddr)
801 switch (obj->state) {
802 case ODEBUG_STATE_ACTIVE:
805 raw_spin_unlock_irqrestore(&db->lock, flags);
806 debug_print_object(obj, "free");
807 debug_object_fixup(descr->fixup_free,
808 (void *) oaddr, state);
811 hlist_del(&obj->node);
812 work |= __free_object(obj);
816 raw_spin_unlock_irqrestore(&db->lock, flags);
818 if (cnt > debug_objects_maxchain)
819 debug_objects_maxchain = cnt;
824 if (objs_checked > debug_objects_maxchecked)
825 debug_objects_maxchecked = objs_checked;
827 /* Schedule work to actually kmem_cache_free() objects */
829 schedule_work(&debug_obj_work);
832 void debug_check_no_obj_freed(const void *address, unsigned long size)
834 if (debug_objects_enabled)
835 __debug_check_no_obj_freed(address, size);
839 #ifdef CONFIG_DEBUG_FS
841 static int debug_stats_show(struct seq_file *m, void *v)
843 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
844 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
845 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
846 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
847 seq_printf(m, "pool_free :%d\n", obj_pool_free);
848 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
849 seq_printf(m, "pool_used :%d\n", obj_pool_used);
850 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
851 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
852 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
853 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
857 static int debug_stats_open(struct inode *inode, struct file *filp)
859 return single_open(filp, debug_stats_show, NULL);
862 static const struct file_operations debug_stats_fops = {
863 .open = debug_stats_open,
866 .release = single_release,
869 static int __init debug_objects_init_debugfs(void)
871 struct dentry *dbgdir, *dbgstats;
873 if (!debug_objects_enabled)
876 dbgdir = debugfs_create_dir("debug_objects", NULL);
880 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
888 debugfs_remove(dbgdir);
892 __initcall(debug_objects_init_debugfs);
895 static inline void debug_objects_init_debugfs(void) { }
898 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
900 /* Random data structure for the self test */
902 unsigned long dummy1[6];
904 unsigned long dummy2[3];
907 static __initdata struct debug_obj_descr descr_type_test;
909 static bool __init is_static_object(void *addr)
911 struct self_test *obj = addr;
913 return obj->static_init;
917 * fixup_init is called when:
918 * - an active object is initialized
920 static bool __init fixup_init(void *addr, enum debug_obj_state state)
922 struct self_test *obj = addr;
925 case ODEBUG_STATE_ACTIVE:
926 debug_object_deactivate(obj, &descr_type_test);
927 debug_object_init(obj, &descr_type_test);
935 * fixup_activate is called when:
936 * - an active object is activated
937 * - an unknown non-static object is activated
939 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
941 struct self_test *obj = addr;
944 case ODEBUG_STATE_NOTAVAILABLE:
946 case ODEBUG_STATE_ACTIVE:
947 debug_object_deactivate(obj, &descr_type_test);
948 debug_object_activate(obj, &descr_type_test);
957 * fixup_destroy is called when:
958 * - an active object is destroyed
960 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
962 struct self_test *obj = addr;
965 case ODEBUG_STATE_ACTIVE:
966 debug_object_deactivate(obj, &descr_type_test);
967 debug_object_destroy(obj, &descr_type_test);
975 * fixup_free is called when:
976 * - an active object is freed
978 static bool __init fixup_free(void *addr, enum debug_obj_state state)
980 struct self_test *obj = addr;
983 case ODEBUG_STATE_ACTIVE:
984 debug_object_deactivate(obj, &descr_type_test);
985 debug_object_free(obj, &descr_type_test);
993 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
995 struct debug_bucket *db;
996 struct debug_obj *obj;
999 enum debug_obj_state obj_state;
1001 db = get_bucket((unsigned long) addr);
1003 raw_spin_lock_irqsave(&db->lock, flags);
1005 obj = lookup_object(addr, db);
1006 obj_state = obj ? obj->state : state;
1008 raw_spin_unlock_irqrestore(&db->lock, flags);
1010 if (!obj && state != ODEBUG_STATE_NONE) {
1011 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1014 if (obj_state != state) {
1015 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1019 if (fixups != debug_objects_fixups) {
1020 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1021 fixups, debug_objects_fixups);
1024 if (warnings != debug_objects_warnings) {
1025 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1026 warnings, debug_objects_warnings);
1032 debug_objects_enabled = 0;
1036 static __initdata struct debug_obj_descr descr_type_test = {
1038 .is_static_object = is_static_object,
1039 .fixup_init = fixup_init,
1040 .fixup_activate = fixup_activate,
1041 .fixup_destroy = fixup_destroy,
1042 .fixup_free = fixup_free,
1045 static __initdata struct self_test obj = { .static_init = 0 };
1047 static void __init debug_objects_selftest(void)
1049 int fixups, oldfixups, warnings, oldwarnings;
1050 unsigned long flags;
1052 local_irq_save(flags);
1054 fixups = oldfixups = debug_objects_fixups;
1055 warnings = oldwarnings = debug_objects_warnings;
1056 descr_test = &descr_type_test;
1058 debug_object_init(&obj, &descr_type_test);
1059 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1061 debug_object_activate(&obj, &descr_type_test);
1062 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1064 debug_object_activate(&obj, &descr_type_test);
1065 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1067 debug_object_deactivate(&obj, &descr_type_test);
1068 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1070 debug_object_destroy(&obj, &descr_type_test);
1071 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1073 debug_object_init(&obj, &descr_type_test);
1074 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1076 debug_object_activate(&obj, &descr_type_test);
1077 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1079 debug_object_deactivate(&obj, &descr_type_test);
1080 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1082 debug_object_free(&obj, &descr_type_test);
1083 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1086 obj.static_init = 1;
1087 debug_object_activate(&obj, &descr_type_test);
1088 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1090 debug_object_init(&obj, &descr_type_test);
1091 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1093 debug_object_free(&obj, &descr_type_test);
1094 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1097 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1098 debug_object_init(&obj, &descr_type_test);
1099 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1101 debug_object_activate(&obj, &descr_type_test);
1102 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1104 __debug_check_no_obj_freed(&obj, sizeof(obj));
1105 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1108 pr_info("selftest passed\n");
1111 debug_objects_fixups = oldfixups;
1112 debug_objects_warnings = oldwarnings;
1115 local_irq_restore(flags);
1118 static inline void debug_objects_selftest(void) { }
1122 * Called during early boot to initialize the hash buckets and link
1123 * the static object pool objects into the poll list. After this call
1124 * the object tracker is fully operational.
1126 void __init debug_objects_early_init(void)
1130 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1131 raw_spin_lock_init(&obj_hash[i].lock);
1133 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1134 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1138 * Convert the statically allocated objects to dynamic ones:
1140 static int __init debug_objects_replace_static_objects(void)
1142 struct debug_bucket *db = obj_hash;
1143 struct hlist_node *tmp;
1144 struct debug_obj *obj, *new;
1145 HLIST_HEAD(objects);
1148 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1149 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1152 hlist_add_head(&obj->node, &objects);
1156 * debug_objects_mem_init() is now called early that only one CPU is up
1157 * and interrupts have been disabled, so it is safe to replace the
1158 * active object references.
1161 /* Remove the statically allocated objects from the pool */
1162 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1163 hlist_del(&obj->node);
1164 /* Move the allocated objects to the pool */
1165 hlist_move_list(&objects, &obj_pool);
1167 /* Replace the active object references */
1168 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1169 hlist_move_list(&db->list, &objects);
1171 hlist_for_each_entry(obj, &objects, node) {
1172 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1173 hlist_del(&new->node);
1174 /* copy object data */
1176 hlist_add_head(&new->node, &db->list);
1181 pr_debug("%d of %d active objects replaced\n",
1182 cnt, obj_pool_used);
1185 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1186 hlist_del(&obj->node);
1187 kmem_cache_free(obj_cache, obj);
1193 * Called after the kmem_caches are functional to setup a dedicated
1194 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1195 * prevents that the debug code is called on kmem_cache_free() for the
1196 * debug tracker objects to avoid recursive calls.
1198 void __init debug_objects_mem_init(void)
1200 if (!debug_objects_enabled)
1203 obj_cache = kmem_cache_create("debug_objects_cache",
1204 sizeof (struct debug_obj), 0,
1205 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1208 if (!obj_cache || debug_objects_replace_static_objects()) {
1209 debug_objects_enabled = 0;
1210 kmem_cache_destroy(obj_cache);
1211 pr_warn("out of memory.\n");
1213 debug_objects_selftest();
1216 * Increase the thresholds for allocating and freeing objects
1217 * according to the number of possible CPUs available in the system.
1219 debug_objects_pool_size += num_possible_cpus() * 32;
1220 debug_objects_pool_min_level += num_possible_cpus() * 4;