patch-2.4.5 linux/include/asm-sparc64/semaphore.h
Next file: linux/include/linux/b1lli.h
Previous file: linux/include/asm-sparc64/semaphore-helper.h
Back to the patch index
Back to the overall index
- Lines: 361
- Date:
Sun May 20 11:32:08 2001
- Orig file:
v2.4.4/linux/include/asm-sparc64/semaphore.h
- Orig date:
Thu Apr 26 22:17:26 2001
diff -u --recursive --new-file v2.4.4/linux/include/asm-sparc64/semaphore.h linux/include/asm-sparc64/semaphore.h
@@ -1,52 +1,40 @@
#ifndef _SPARC64_SEMAPHORE_H
#define _SPARC64_SEMAPHORE_H
-/* These are actually reasonable on the V9. */
+/* These are actually reasonable on the V9.
+ *
+ * See asm-ppc/semaphore.h for implementation commentary,
+ * only sparc64 specific issues are commented here.
+ */
#ifdef __KERNEL__
#include <asm/atomic.h>
-#include <asm/bitops.h>
#include <asm/system.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
struct semaphore {
atomic_t count;
- atomic_t waking;
wait_queue_head_t wait;
-#if WAITQUEUE_DEBUG
- long __magic;
-#endif
};
-#if WAITQUEUE_DEBUG
-# define __SEM_DEBUG_INIT(name) \
- , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
-{ ATOMIC_INIT(count), ATOMIC_INIT(0), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
- __SEM_DEBUG_INIT(name) }
+#define __SEMAPHORE_INITIALIZER(name, count) \
+ { ATOMIC_INIT(count), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
#define __MUTEX_INITIALIZER(name) \
- __SEMAPHORE_INITIALIZER(name,1)
+ __SEMAPHORE_INITIALIZER(name, 1)
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
- atomic_set(&sem->waking, 0);
init_waitqueue_head(&sem->wait);
-#if WAITQUEUE_DEBUG
- sem->__magic = (long)&sem->__magic;
-#endif
}
static inline void init_MUTEX (struct semaphore *sem)
@@ -61,143 +49,179 @@
extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
extern void __up(struct semaphore * sem);
-extern __inline__ void down(struct semaphore * sem)
+static __inline__ void down(struct semaphore * sem)
{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
- __asm__ __volatile__(
-" 1: lduw [%0], %%g5\n"
-" sub %%g5, 1, %%g7\n"
-" cas [%0], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" cmp %%g7, 1\n"
-" bl,pn %%icc, 3f\n"
-" membar #StoreStore\n"
-" 2:\n"
-" .subsection 2\n"
-" 3: mov %0, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %1\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%l3, %%g0, %%g3\n"
-" .previous\n"
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * sem->count = new_val;
+ * if (old_val < 1)
+ * __down(sem);
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down sem(%0)\n"
+"1: lduw [%0], %%g5\n"
+" sub %%g5, 1, %%g7\n"
+" cas [%0], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" cmp %%g7, 1\n"
+" bl,pn %%icc, 3f\n"
+" membar #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %0, %%g5\n"
+" save %%sp, -160, %%sp\n"
+" mov %%g1, %%l1\n"
+" mov %%g2, %%l2\n"
+" mov %%g3, %%l3\n"
+" call %1\n"
+" mov %%g5, %%o0\n"
+" mov %%l1, %%g1\n"
+" mov %%l2, %%g2\n"
+" ba,pt %%xcc, 2b\n"
+" restore %%l3, %%g0, %%g3\n"
+" .previous\n"
: : "r" (sem), "i" (__down)
: "g5", "g7", "memory", "cc");
}
-extern __inline__ int down_interruptible(struct semaphore *sem)
+static __inline__ int down_interruptible(struct semaphore *sem)
{
int ret = 0;
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
- __asm__ __volatile__(
-" 1: lduw [%2], %%g5\n"
-" sub %%g5, 1, %%g7\n"
-" cas [%2], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" cmp %%g7, 1\n"
-" bl,pn %%icc, 3f\n"
-" membar #StoreStore\n"
-" 2:\n"
-" .subsection 2\n"
-" 3: mov %2, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %3\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" mov %%l3, %%g3\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%o0, %%g0, %0\n"
-" .previous\n"
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * sem->count = new_val;
+ * if (old_val < 1)
+ * ret = __down_interruptible(sem);
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down_interruptible sem(%2) ret(%0)\n"
+"1: lduw [%2], %%g5\n"
+" sub %%g5, 1, %%g7\n"
+" cas [%2], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" cmp %%g7, 1\n"
+" bl,pn %%icc, 3f\n"
+" membar #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %2, %%g5\n"
+" save %%sp, -160, %%sp\n"
+" mov %%g1, %%l1\n"
+" mov %%g2, %%l2\n"
+" mov %%g3, %%l3\n"
+" call %3\n"
+" mov %%g5, %%o0\n"
+" mov %%l1, %%g1\n"
+" mov %%l2, %%g2\n"
+" mov %%l3, %%g3\n"
+" ba,pt %%xcc, 2b\n"
+" restore %%o0, %%g0, %0\n"
+" .previous\n"
: "=r" (ret)
: "0" (ret), "r" (sem), "i" (__down_interruptible)
: "g5", "g7", "memory", "cc");
return ret;
}
-extern inline int down_trylock(struct semaphore *sem)
+static __inline__ int down_trylock(struct semaphore *sem)
{
- int ret = 0;
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
- __asm__ __volatile__(
-" 1: lduw [%2], %%g5\n"
-" sub %%g5, 1, %%g7\n"
-" cas [%2], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" cmp %%g7, 1\n"
-" bl,pn %%icc, 3f\n"
-" membar #StoreStore\n"
-" 2:\n"
-" .subsection 2\n"
-" 3: mov %2, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %3\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" mov %%l3, %%g3\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%o0, %%g0, %0\n"
-" .previous\n"
- : "=r" (ret)
- : "0" (ret), "r" (sem), "i" (__down_trylock)
+ int ret;
+
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * if (old_val < 1) {
+ * ret = 1;
+ * } else {
+ * sem->count = new_val;
+ * ret = 0;
+ * }
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down_trylock sem(%1) ret(%0)\n"
+"1: lduw [%1], %%g5\n"
+" sub %%g5, 1, %%g7\n"
+" cmp %%g5, 1\n"
+" bl,pn %%icc, 2f\n"
+" mov 1, %0\n"
+" cas [%1], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" mov 0, %0\n"
+" membar #StoreStore\n"
+"2:\n"
+ : "=&r" (ret)
+ : "r" (sem)
: "g5", "g7", "memory", "cc");
+
return ret;
}
-extern __inline__ void up(struct semaphore * sem)
+static __inline__ void up(struct semaphore * sem)
{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
- __asm__ __volatile__(
-" membar #StoreLoad | #LoadLoad\n"
-" 1: lduw [%0], %%g5\n"
-" add %%g5, 1, %%g7\n"
-" cas [%0], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" addcc %%g7, 1, %%g0\n"
-" ble,pn %%icc, 3f\n"
-" nop\n"
-" 2:\n"
-" .subsection 2\n"
-" 3: mov %0, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %1\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%l3, %%g0, %%g3\n"
-" .previous\n"
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count + 1;
+ * sem->count = new_val;
+ * if (old_val < 0)
+ * __up(sem);
+ *
+ * The (old_val < 0) test is equivalent to
+ * the more straightforward (new_val <= 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! up sem(%0)\n"
+" membar #StoreLoad | #LoadLoad\n"
+"1: lduw [%0], %%g5\n"
+" add %%g5, 1, %%g7\n"
+" cas [%0], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" addcc %%g7, 1, %%g0\n"
+" ble,pn %%icc, 3f\n"
+" nop\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %0, %%g5\n"
+" save %%sp, -160, %%sp\n"
+" mov %%g1, %%l1\n"
+" mov %%g2, %%l2\n"
+" mov %%g3, %%l3\n"
+" call %1\n"
+" mov %%g5, %%o0\n"
+" mov %%l1, %%g1\n"
+" mov %%l2, %%g2\n"
+" ba,pt %%xcc, 2b\n"
+" restore %%l3, %%g0, %%g3\n"
+" .previous\n"
: : "r" (sem), "i" (__up)
: "g5", "g7", "memory", "cc");
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)