From: Ray Bryant <raybry@sgi.com>

There is a problem with in_lock_functions() not being declared in
kernel/lockmeter.c and some confusion about whether or not (and how) to
declare __lock_text_start/end in linux/spinlock.h.  Here is patch to fix
this.  It works against 2.6.9-rc2 but since it only modifies lockmeter.c it
should work against mm6.  I've tested in (without preempt) on Altix and on
i386.

(__lock_text_start/stop are declare in linux/spinlock.h as unsigned long,
but in in_lock_functions() as char [], at least in some snapshots of Linus'
tree.  There shoud be no declaration in linux/spinlock.h as I read it.)

This is an incremental patch over the lockmeter fix for the COOL bits.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/kernel/lockmeter.c |   54 +++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 52 insertions(+), 2 deletions(-)

diff -puN kernel/lockmeter.c~lockmeter-lockmeter-fixes-for-preempt-case kernel/lockmeter.c
--- 25/kernel/lockmeter.c~lockmeter-lockmeter-fixes-for-preempt-case	Thu Sep 16 14:53:31 2004
+++ 25-akpm/kernel/lockmeter.c	Thu Sep 16 14:53:31 2004
@@ -1236,18 +1236,58 @@ int __lockfunc _write_trylock(rwlock_t *
 EXPORT_SYMBOL(_write_trylock);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
+/*
+ * This could be a long-held lock.  If another CPU holds it for a long time,
+ * and that CPU is not asked to reschedule then *this* CPU will spin on the
+ * lock for a long time, even if *this* CPU is asked to reschedule.
+ *
+ * So what we do here, in the slow (contended) path is to spin on the lock by
+ * hand while permitting preemption.
+ *
+ * Called inside preempt_disable().
+ */
+static inline void __preempt_spin_lock(spinlock_t *lock, void *caller_pc)
+{
+	if (preempt_count() > 1) {
+		_metered_spin_lock(lock, caller_pc);
+		return;
+	}
+
+	do {
+		preempt_enable();
+		while (spin_is_locked(lock))
+			cpu_relax();
+		preempt_disable();
+	} while (!_metered_spin_trylock(lock, caller_pc));
+}
+
 void __lockfunc _spin_lock(spinlock_t *lock)
 {
 	preempt_disable();
 	if (unlikely(!_metered_spin_trylock(lock, __builtin_return_address(0))))
-		__preempt_spin_lock(lock);
+		__preempt_spin_lock(lock, __builtin_return_address(0));
+}
+
+static inline void __preempt_write_lock(rwlock_t *lock, void *caller_pc)
+{
+	if (preempt_count() > 1) {
+		_metered_write_lock(lock, caller_pc);
+		return;
+	}
+
+	do {
+		preempt_enable();
+		while (rwlock_is_locked(lock))
+			cpu_relax();
+		preempt_disable();
+	} while (!_metered_write_trylock(lock,caller_pc);
 }
 
 void __lockfunc _write_lock(rwlock_t *lock)
 {
 	preempt_disable();
 	if (unlikely(!_metered_write_trylock(lock, __builtin_return_address(0))))
-		__preempt_write_lock(lock);
+		__preempt_write_lock(lock, __builtin_return_address(0));
 }
 #else
 void __lockfunc _spin_lock(spinlock_t *lock)
@@ -1458,3 +1498,13 @@ int __lockfunc _spin_trylock_bh(spinlock
 	return 0;
 }
 EXPORT_SYMBOL(_spin_trylock_bh);
+
+int in_lock_functions(unsigned long addr)
+{
+	/* Linker adds these: start and end of __lockfunc functions */
+	extern char __lock_text_start[], __lock_text_end[];
+
+	return addr >= (unsigned long)__lock_text_start
+	&& addr < (unsigned long)__lock_text_end;
+}
+EXPORT_SYMBOL(in_lock_functions);
_