4 * Default SMP lock implementation
7 #ifndef __ASM_SMPLOCK_H__
8 #define __ASM_SMPLOCK_H__
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
13 extern spinlock_t kernel_flag;
15 #define kernel_locked() spin_is_locked(&kernel_flag)
18 * Release global kernel lock and global interrupt lock
20 #define release_kernel_lock(task, cpu) \
22 if (task->lock_depth >= 0) \
23 spin_unlock(&kernel_flag); \
24 release_irqlock(cpu); \
29 * Re-acquire the kernel lock
31 #define reacquire_kernel_lock(task) \
33 if (task->lock_depth >= 0) \
34 spin_lock(&kernel_flag); \
39 * Getting the big kernel lock.
41 * This cannot happen asynchronously,
42 * so we only need to worry about other
45 static __inline__ void lock_kernel(void)
47 if (!++current->lock_depth)
48 spin_lock(&kernel_flag);
51 static __inline__ void unlock_kernel(void)
53 if (--current->lock_depth < 0)
54 spin_unlock(&kernel_flag);
56 #endif /* __ASM_SMPLOCK_H__ */
57 #endif /* __KERNEL__ */