import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / include / asm-x86_64 / smplock.h
1 /*
2  * <asm/smplock.h>
3  */
4 #include <linux/interrupt.h>
5 #include <linux/spinlock.h>
6 #include <linux/sched.h>
7 #include <asm/current.h>
8
9 extern spinlock_cacheline_t kernel_flag_cacheline;  
10 #define kernel_flag kernel_flag_cacheline.lock      
11
12 #define kernel_locked()         spin_is_locked(&kernel_flag)
13
14 /*
15  * Release global kernel lock and global interrupt lock
16  */
17 #define release_kernel_lock(task, cpu) \
18 do { \
19         if (task->lock_depth >= 0) \
20                 spin_unlock(&kernel_flag); \
21         release_irqlock(cpu); \
22         __sti(); \
23 } while (0)
24
25 /*
26  * Re-acquire the kernel lock
27  */
28 #define reacquire_kernel_lock(task) \
29 do { \
30         if (task->lock_depth >= 0) \
31                 spin_lock(&kernel_flag); \
32 } while (0)
33
34
35 /*
36  * Getting the big kernel lock.
37  *
38  * This cannot happen asynchronously,
39  * so we only need to worry about other
40  * CPU's.
41  */
42 extern __inline__ void lock_kernel(void)
43 {
44 #if 1
45         if (!++current->lock_depth)
46                 spin_lock(&kernel_flag);
47 #else
48         __asm__ __volatile__(
49                 "incl %1\n\t"
50                 "jne 9f"
51                 spin_lock_string
52                 "\n9:"
53                 :"=m" (__dummy_lock(&kernel_flag)),
54                  "=m" (current->lock_depth));
55 #endif
56 }
57
58 extern __inline__ void unlock_kernel(void)
59 {
60         if (current->lock_depth < 0)
61                 out_of_line_bug();
62 #if 1
63         if (--current->lock_depth < 0)
64                 spin_unlock(&kernel_flag);
65 #else
66         __asm__ __volatile__(
67                 "decl %1\n\t"
68                 "jns 9f\n\t"
69                 spin_unlock_string
70                 "\n9:"
71                 :"=m" (__dummy_lock(&kernel_flag)),
72                  "=m" (current->lock_depth));
73 #endif
74 }