4 * Default SMP lock implementation
6 #include <linux/interrupt.h>
7 #include <linux/spinlock.h>
8 #include <linux/sched.h>
10 #include <asm/current.h>
11 #include <asm/hardirq.h>
13 extern spinlock_t kernel_flag;
15 #define kernel_locked() spin_is_locked(&kernel_flag)
18 * Release global kernel lock and global interrupt lock
20 static __inline__ void
21 release_kernel_lock(struct task_struct *task, int cpu)
23 if (task->lock_depth >= 0)
24 spin_unlock(&kernel_flag);
30 * Re-acquire the kernel lock
32 static __inline__ void
33 reacquire_kernel_lock(struct task_struct *task)
35 if (task->lock_depth >= 0)
36 spin_lock(&kernel_flag);
40 * Getting the big kernel lock.
42 * This cannot happen asynchronously,
43 * so we only need to worry about other
46 static __inline__ void
49 if (!++current->lock_depth)
50 spin_lock(&kernel_flag);
53 static __inline__ void
56 if (--current->lock_depth < 0)
57 spin_unlock(&kernel_flag);