4 * Default SMP lock implementation
6 #ifndef __ASM_SMPLOCK_H
7 #define __ASM_SMPLOCK_H
9 #include <linux/sched.h>
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
13 extern spinlock_t kernel_flag;
15 #define kernel_locked() spin_is_locked(&kernel_flag)
18 * Release global kernel lock and global interrupt lock
20 static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
22 if (task->lock_depth >= 0)
23 spin_unlock(&kernel_flag);
29 * Re-acquire the kernel lock
31 static __inline__ void reacquire_kernel_lock(struct task_struct *task)
33 if (task->lock_depth >= 0)
34 spin_lock(&kernel_flag);
38 * Getting the big kernel lock.
40 * This cannot happen asynchronously,
41 * so we only need to worry about other
44 static __inline__ void lock_kernel(void)
46 if (!++current->lock_depth)
47 spin_lock(&kernel_flag);
50 static __inline__ void unlock_kernel(void)
52 if (--current->lock_depth < 0)
53 spin_unlock(&kernel_flag);
56 #endif /* __ASM_SMPLOCK_H */