From: Rik Van Riel <riel@redhat.com>

Add sem_is_read/write_locked functions to the read/write semaphores, along the
same lines of the *_is_locked spinlock functions.  The swap token tuning patch
uses sem_is_read_locked; sem_is_write_locked is added for completeness.

Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 include/asm-alpha/rwsem.h      |   10 ++++++++++
 include/asm-i386/rwsem.h       |   10 ++++++++++
 include/asm-ia64/rwsem.h       |   10 ++++++++++
 include/asm-ppc/rwsem.h        |   10 ++++++++++
 include/asm-ppc64/rwsem.h      |   10 ++++++++++
 include/asm-s390/rwsem.h       |   10 ++++++++++
 include/asm-sh/rwsem.h         |   10 ++++++++++
 include/asm-sparc64/rwsem.h    |   10 ++++++++++
 include/asm-x86_64/rwsem.h     |   10 ++++++++++
 include/linux/rwsem-spinlock.h |   10 ++++++++++
 10 files changed, 100 insertions(+)

diff -puN include/asm-alpha/rwsem.h~add-sem_is_read-write_locked include/asm-alpha/rwsem.h
--- 25/include/asm-alpha/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-alpha/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -262,5 +262,15 @@ static inline long rwsem_atomic_update(l
 #endif
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
diff -puN include/asm-i386/rwsem.h~add-sem_is_read-write_locked include/asm-i386/rwsem.h
--- 25/include/asm-i386/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-i386/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -284,5 +284,15 @@ LOCK_PREFIX	"xadd %0,(%2)"
 	return tmp+delta;
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _I386_RWSEM_H */
diff -puN include/asm-ia64/rwsem.h~add-sem_is_read-write_locked include/asm-ia64/rwsem.h
--- 25/include/asm-ia64/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-ia64/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -185,4 +185,14 @@ __downgrade_write (struct rw_semaphore *
 #define rwsem_atomic_add(delta, sem)	atomic_add(delta, (atomic_t *)(&(sem)->count))
 #define rwsem_atomic_update(delta, sem)	atomic_add_return(delta, (atomic_t *)(&(sem)->count))
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* _ASM_IA64_RWSEM_H */
diff -puN include/asm-ppc64/rwsem.h~add-sem_is_read-write_locked include/asm-ppc64/rwsem.h
--- 25/include/asm-ppc64/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-ppc64/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -163,5 +163,15 @@ static inline int rwsem_atomic_update(in
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _PPC_RWSEM_XADD_H */
diff -puN include/asm-ppc/rwsem.h~add-sem_is_read-write_locked include/asm-ppc/rwsem.h
--- 25/include/asm-ppc/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-ppc/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -168,5 +168,15 @@ static inline int rwsem_atomic_update(in
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _PPC_RWSEM_XADD_H */
diff -puN include/asm-s390/rwsem.h~add-sem_is_read-write_locked include/asm-s390/rwsem.h
--- 25/include/asm-s390/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-s390/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -351,5 +351,15 @@ static inline long rwsem_atomic_update(l
 	return new;
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _S390_RWSEM_H */
diff -puN include/asm-sh/rwsem.h~add-sem_is_read-write_locked include/asm-sh/rwsem.h
--- 25/include/asm-sh/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-sh/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -166,5 +166,15 @@ static inline int rwsem_atomic_update(in
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_SH_RWSEM_H */
diff -puN include/asm-sparc64/rwsem.h~add-sem_is_read-write_locked include/asm-sparc64/rwsem.h
--- 25/include/asm-sparc64/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-sparc64/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -96,6 +96,16 @@ static __inline__ signed long rwsem_cmpx
 	return cmpxchg(&sem->count,old,new);
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _SPARC64_RWSEM_H */
diff -puN include/asm-x86_64/rwsem.h~add-sem_is_read-write_locked include/asm-x86_64/rwsem.h
--- 25/include/asm-x86_64/rwsem.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/asm-x86_64/rwsem.h	Tue Jun 28 15:42:30 2005
@@ -274,5 +274,15 @@ LOCK_PREFIX	"xaddl %0,(%2)"
 	return tmp+delta;
 }
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->count > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _X8664_RWSEM_H */
diff -puN include/linux/rwsem-spinlock.h~add-sem_is_read-write_locked include/linux/rwsem-spinlock.h
--- 25/include/linux/rwsem-spinlock.h~add-sem_is_read-write_locked	Tue Jun 28 15:42:30 2005
+++ 25-akpm/include/linux/rwsem-spinlock.h	Tue Jun 28 15:42:30 2005
@@ -61,5 +61,15 @@ extern void FASTCALL(__up_read(struct rw
 extern void FASTCALL(__up_write(struct rw_semaphore *sem));
 extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem));
 
+static inline int sem_is_read_locked(struct rw_semaphore *sem)
+{
+	return (sem->activity > 0);
+}
+
+static inline int sem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->activity < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_SPINLOCK_H */
_