original comment: +Wilson03172004,marked due to this pci host does not support MWI
[linux-2.4.git] / include / asm-s390 / atomic.h
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3
4 /*
5  *  include/asm-s390/atomic.h
6  *
7  *  S390 version
8  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
9  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
10  *               Denis Joseph Barrow
11  *
12  *  Derived from "include/asm-i386/bitops.h"
13  *    Copyright (C) 1992, Linus Torvalds
14  *
15  */
16
17 /*
18  * Atomic operations that C can't guarantee us.  Useful for
19  * resource counting etc..
20  * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
21  */
22
23 typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t;
24 #define ATOMIC_INIT(i)  { (i) }
25
26 #define atomic_eieio()          __asm__ __volatile__ ("BCR 15,0")
27
28 #define __CS_LOOP(old_val, new_val, ptr, op_val, op_string)             \
29         __asm__ __volatile__("   l     %0,0(%2)\n"                      \
30                              "0: lr    %1,%0\n"                         \
31                              op_string "  %1,%3\n"                      \
32                              "   cs    %0,%1,0(%2)\n"                   \
33                              "   jl    0b"                              \
34                              : "=&d" (old_val), "=&d" (new_val)         \
35                              : "a" (ptr), "d" (op_val) : "cc" );
36
37 #define atomic_read(v)          ((v)->counter)
38 #define atomic_set(v,i)         (((v)->counter) = (i))
39
40 static __inline__ void atomic_add(int i, atomic_t *v)
41 {
42         int old_val, new_val;
43         __CS_LOOP(old_val, new_val, v, i, "ar");
44 }
45
46 static __inline__ int atomic_add_return (int i, atomic_t *v)
47 {
48         int old_val, new_val;
49         __CS_LOOP(old_val, new_val, v, i, "ar");
50         return new_val;
51 }
52
53 static __inline__ int atomic_add_negative(int i, atomic_t *v)
54 {
55         int old_val, new_val;
56         __CS_LOOP(old_val, new_val, v, i, "ar");
57         return new_val < 0;
58 }
59
60 static __inline__ void atomic_sub(int i, atomic_t *v)
61 {
62         int old_val, new_val;
63         __CS_LOOP(old_val, new_val, v, i, "sr");
64 }
65
66 static __inline__ void atomic_inc(volatile atomic_t *v)
67 {
68         int old_val, new_val;
69         __CS_LOOP(old_val, new_val, v, 1, "ar");
70 }
71
72 static __inline__ int atomic_inc_return(volatile atomic_t *v)
73 {
74         int old_val, new_val;
75         __CS_LOOP(old_val, new_val, v, 1, "ar");
76         return new_val;
77 }
78
79 static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
80 {
81         int old_val, new_val;
82         __CS_LOOP(old_val, new_val, v, 1, "ar");
83         return new_val != 0;
84 }
85
86 static __inline__ void atomic_dec(volatile atomic_t *v)
87 {
88         int old_val, new_val;
89         __CS_LOOP(old_val, new_val, v, 1, "sr");
90 }
91
92 static __inline__ int atomic_dec_return(volatile atomic_t *v)
93 {
94         int old_val, new_val;
95         __CS_LOOP(old_val, new_val, v, 1, "sr");
96         return new_val;
97 }
98
99 static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
100 {
101         int old_val, new_val;
102         __CS_LOOP(old_val, new_val, v, 1, "sr");
103         return new_val == 0;
104 }
105
106 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v)
107 {
108         int old_val, new_val;
109         __CS_LOOP(old_val, new_val, v, ~mask, "nr");
110 }
111
112 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v)
113 {
114         int old_val, new_val;
115         __CS_LOOP(old_val, new_val, v, mask, "or");
116 }
117
118 /*
119   returns 0  if expected_oldval==value in *v ( swap was successful )
120   returns 1  if unsuccessful.
121 */
122 static __inline__ int
123 atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
124 {
125         int retval;
126
127         __asm__ __volatile__(
128                 "  lr   0,%2\n"
129                 "  cs   0,%3,0(%1)\n"
130                 "  ipm  %0\n"
131                 "  srl  %0,28\n"
132                 "0:"
133                 : "=&d" (retval)
134                 : "a" (v), "d" (expected_oldval) , "d" (new_val)
135                 : "0", "cc");
136         return retval;
137 }
138
139 /*
140   Spin till *v = expected_oldval then swap with newval.
141  */
142 static __inline__ void
143 atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
144 {
145         __asm__ __volatile__(
146                 "0: lr  0,%1\n"
147                 "   cs  0,%2,0(%0)\n"
148                 "   jl  0b\n"
149                 : : "a" (v), "d" (expected_oldval) , "d" (new_val)
150                 : "cc", "0" );
151 }
152
153 #define atomic_compare_and_swap_debug(where,from,to) \
154 if (atomic_compare_and_swap ((from), (to), (where))) {\
155         printk (KERN_WARNING"%s/%d atomic counter:%s couldn't be changed from %d(%s) to %d(%s), was %d\n",\
156                 __FILE__,__LINE__,#where,(from),#from,(to),#to,atomic_read (where));\
157         atomic_set(where,(to));\
158 }
159
160 #define smp_mb__before_atomic_dec()     smp_mb()
161 #define smp_mb__after_atomic_dec()      smp_mb()
162 #define smp_mb__before_atomic_inc()     smp_mb()
163 #define smp_mb__after_atomic_inc()      smp_mb()
164
165 #endif                                 /* __ARCH_S390_ATOMIC __            */
166