patch-2.1.34 linux/include/asm-alpha/atomic.h

Next file: linux/include/asm-alpha/checksum.h
Previous file: linux/fs/super.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.33/linux/include/asm-alpha/atomic.h linux/include/asm-alpha/atomic.h
@@ -9,7 +9,16 @@
  * than regular operations.
  */
 
-typedef int atomic_t;
+#ifdef __SMP__
+typedef struct { volatile int counter; } atomic_t;
+#else
+typedef struct { int counter; } atomic_t;
+#endif
+
+#define ATOMIC_INIT	{ 0 }
+
+#define atomic_read(v)		((v)->counter)
+#define atomic_set(v)		(((v)->counter) = i)
 
 /*
  * Make sure gcc doesn't try to be clever and move things around
@@ -24,7 +33,7 @@
  * branch back to restart the operation.
  */
 
-extern __inline__ void atomic_add(atomic_t i, atomic_t * v)
+extern __inline__ void atomic_add(int i, atomic_t * v)
 {
 	unsigned long temp;
 	__asm__ __volatile__(
@@ -39,7 +48,7 @@
 	:"Ir" (i), "m" (__atomic_fool_gcc(v)));
 }
 
-extern __inline__ void atomic_sub(atomic_t i, atomic_t * v)
+extern __inline__ void atomic_sub(int i, atomic_t * v)
 {
 	unsigned long temp;
 	__asm__ __volatile__(
@@ -57,7 +66,7 @@
 /*
  * Same as above, but return the result value
  */
-extern __inline__ long atomic_add_return(atomic_t i, atomic_t * v)
+extern __inline__ long atomic_add_return(int i, atomic_t * v)
 {
 	long temp, result;
 	__asm__ __volatile__(
@@ -74,7 +83,7 @@
 	return result;
 }
 
-extern __inline__ long atomic_sub_return(atomic_t i, atomic_t * v)
+extern __inline__ long atomic_sub_return(int i, atomic_t * v)
 {
 	long temp, result;
 	__asm__ __volatile__(

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov