patch-2.1.27 linux/kernel/softirq.c

Next file: linux/kernel/sysctl.c
Previous file: linux/kernel/signal.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.26/linux/kernel/softirq.c linux/kernel/softirq.c
@@ -22,8 +22,9 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/bitops.h>
+#include <asm/atomic.h>
 
-unsigned long intr_count = 0;
+atomic_t intr_count = 0;
 
 int bh_mask_count[32];
 unsigned long bh_active = 0;
@@ -31,32 +32,52 @@
 void (*bh_base[32])(void);
 
 
-asmlinkage void do_bottom_half(void)
+/*
+ * This needs to make sure that only one bottom half handler
+ * is ever active at a time. We do this without locking by
+ * doing an atomic increment on the intr_count, and checking
+ * (nonatomically) against 1. Only if it's 1 do we schedule
+ * the bottom half.
+ *
+ * Note that the non-atomicity of the test (as opposed to the
+ * actual update) means that the test may fail, and _nobody_
+ * runs the handlers if there is a race that makes multiple
+ * CPU's get here at the same time. That's ok, we'll run them
+ * next time around.
+ */
+static inline void run_bottom_halves(void)
 {
 	unsigned long active;
 	unsigned long mask, left;
 	void (**bh)(void);
 
-	lock_kernel();
-	intr_count=1;
+	cli();
+	active = bh_active & bh_mask;
+	bh_active &= ~active;
 	sti();
 	bh = bh_base;
-	active = bh_active & bh_mask;
 	for (mask = 1, left = ~0 ; left & active ; bh++,mask += mask,left += left) {
 		if (mask & active) {
-			void (*fn)(void);
-			bh_active &= ~mask;
-			fn = *bh;
-			if (!fn)
-				goto bad_bh;
-			fn();
+			(*bh)();
 		}
 	}
-	goto out;
-bad_bh:
-	printk ("irq.c:bad bottom half entry %08lx\n", mask);
-out:
-	intr_count=0;
+}
+
+/*
+ * We really shouldn't need to get the kernel lock here,
+ * but we do it the easy way for now (the scheduler gets
+ * upset if somebody messes with intr_count without having
+ * the kernel lock).
+ *
+ * Get rid of the kernel lock here at the same time we
+ * make interrupt handling sane. 
+ */
+asmlinkage void do_bottom_half(void)
+{
+	lock_kernel();
+	atomic_inc(&intr_count);
+	if (intr_count == 1)
+		run_bottom_halves();
+	atomic_dec(&intr_count);
 	unlock_kernel();
-	return;
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov