4 * Default SMP lock implementation
7 #include <linux/sched.h>
8 #include <linux/interrupt.h>
9 #include <linux/spinlock.h>
11 extern spinlock_t kernel_flag;
13 #define kernel_locked() spin_is_locked(&kernel_flag)
16 * Release global kernel lock and global interrupt lock
18 static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
20 if (task->lock_depth >= 0)
21 spin_unlock(&kernel_flag);
27 * Re-acquire the kernel lock
29 static __inline__ void reacquire_kernel_lock(struct task_struct *task)
31 if (task->lock_depth >= 0)
32 spin_lock(&kernel_flag);
36 * Getting the big kernel lock.
38 * This cannot happen asynchronously,
39 * so we only need to worry about other
42 static __inline__ void lock_kernel(void)
44 if (!++current->lock_depth)
45 spin_lock(&kernel_flag);
48 static __inline__ void unlock_kernel(void)
50 if (--current->lock_depth < 0)
51 spin_unlock(&kernel_flag);