4 #include <linux/interrupt.h>
5 #include <linux/spinlock.h>
6 #include <linux/sched.h>
7 #include <asm/current.h>
9 extern spinlock_cacheline_t kernel_flag_cacheline;
10 #define kernel_flag kernel_flag_cacheline.lock
12 #define kernel_locked() spin_is_locked(&kernel_flag)
15 * Release global kernel lock and global interrupt lock
17 #define release_kernel_lock(task, cpu) \
19 if (task->lock_depth >= 0) \
20 spin_unlock(&kernel_flag); \
21 release_irqlock(cpu); \
26 * Re-acquire the kernel lock
28 #define reacquire_kernel_lock(task) \
30 if (task->lock_depth >= 0) \
31 spin_lock(&kernel_flag); \
36 * Getting the big kernel lock.
38 * This cannot happen asynchronously,
39 * so we only need to worry about other
42 extern __inline__ void lock_kernel(void)
45 if (!++current->lock_depth)
46 spin_lock(&kernel_flag);
53 :"=m" (__dummy_lock(&kernel_flag)),
54 "=m" (current->lock_depth));
58 extern __inline__ void unlock_kernel(void)
60 if (current->lock_depth < 0)
63 if (--current->lock_depth < 0)
64 spin_unlock(&kernel_flag);
71 :"=m" (__dummy_lock(&kernel_flag)),
72 "=m" (current->lock_depth));