From: Anton Blanchard <anton@samba.org>

Minimal infrastructure to permit sleep-in-spinlock debug checking on
architectures which do not support preemption.



---

 arch/ppc64/Kconfig          |    7 +++++++
 include/asm-ppc64/hardirq.h |    2 +-
 include/linux/preempt.h     |   17 +++++++++++------
 kernel/sched.c              |    4 ++--
 4 files changed, 21 insertions(+), 9 deletions(-)

diff -puN arch/ppc64/Kconfig~ppc64-spinlock-sleep-debugging arch/ppc64/Kconfig
--- 25/arch/ppc64/Kconfig~ppc64-spinlock-sleep-debugging	2004-02-11 21:15:05.000000000 -0800
+++ 25-akpm/arch/ppc64/Kconfig	2004-02-11 21:15:05.000000000 -0800
@@ -357,6 +357,13 @@ config DEBUG_INFO
 	  debugging info resulting in a larger kernel image.
 	  Say Y here only if you plan to use gdb to debug the kernel.
 	  If you don't debug the kernel, you can say N.
+
+config DEBUG_SPINLOCK_SLEEP
+	bool "Sleep-inside-spinlock checking"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here, various routines which may sleep will become very
+	  noisy if they are called with a spinlock held.
 	  
 endmenu
 
diff -puN include/asm-ppc64/hardirq.h~ppc64-spinlock-sleep-debugging include/asm-ppc64/hardirq.h
--- 25/include/asm-ppc64/hardirq.h~ppc64-spinlock-sleep-debugging	2004-02-11 21:15:05.000000000 -0800
+++ 25-akpm/include/asm-ppc64/hardirq.h	2004-02-11 21:15:05.000000000 -0800
@@ -80,7 +80,7 @@ typedef struct {
 
 #define irq_enter()		(preempt_count() += HARDIRQ_OFFSET)
 
-#ifdef CONFIG_PREEMPT
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_DEBUG_SPINLOCK_SLEEP)
 # define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
 #else
diff -puN include/linux/preempt.h~ppc64-spinlock-sleep-debugging include/linux/preempt.h
--- 25/include/linux/preempt.h~ppc64-spinlock-sleep-debugging	2004-02-11 21:15:05.000000000 -0800
+++ 25-akpm/include/linux/preempt.h	2004-02-11 21:15:05.000000000 -0800
@@ -24,6 +24,17 @@ do { \
 
 extern void preempt_schedule(void);
 
+#define preempt_check_resched() \
+do { \
+	if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+		preempt_schedule(); \
+} while (0)
+#else
+#define preempt_check_resched()		do { } while (0)
+#endif
+
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_DEBUG_SPINLOCK_SLEEP)
+
 #define preempt_disable() \
 do { \
 	inc_preempt_count(); \
@@ -36,12 +47,6 @@ do { \
 	dec_preempt_count(); \
 } while (0)
 
-#define preempt_check_resched() \
-do { \
-	if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
-		preempt_schedule(); \
-} while (0)
-
 #define preempt_enable() \
 do { \
 	preempt_enable_no_resched(); \
diff -puN kernel/sched.c~ppc64-spinlock-sleep-debugging kernel/sched.c
--- 25/kernel/sched.c~ppc64-spinlock-sleep-debugging	2004-02-11 21:15:05.000000000 -0800
+++ 25-akpm/kernel/sched.c	2004-02-11 21:15:05.000000000 -0800
@@ -728,7 +728,7 @@ void sched_fork(task_t *p)
 	INIT_LIST_HEAD(&p->run_list);
 	p->array = NULL;
 	spin_lock_init(&p->switch_lock);
-#ifdef CONFIG_PREEMPT
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_DEBUG_SPINLOCK_SLEEP)
 	/*
 	 * During context-switch we hold precisely one spinlock, which
 	 * schedule_tail drops. (in the common case it's this_rq()->lock,
@@ -2669,7 +2669,7 @@ void __init init_idle(task_t *idle, int 
 	local_irq_restore(flags);
 
 	/* Set the preempt count _outside_ the spinlocks! */
-#ifdef CONFIG_PREEMPT
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_DEBUG_SPINLOCK_SLEEP)
 	idle->thread_info->preempt_count = (idle->lock_depth >= 0);
 #else
 	idle->thread_info->preempt_count = 0;

_