patch-2.1.127 linux/include/asm-sparc/softirq.h

Next file: linux/include/asm-sparc/spinlock.h
Previous file: linux/include/asm-sparc/sigcontext.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.126/linux/include/asm-sparc/softirq.h linux/include/asm-sparc/softirq.h
@@ -1,133 +1,173 @@
 /* softirq.h: 32-bit Sparc soft IRQ support.
  *
  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Anton Blanchard (anton@progsoc.uts.edu.au)
  */
 
 #ifndef __SPARC_SOFTIRQ_H
 #define __SPARC_SOFTIRQ_H
 
 #include <asm/atomic.h>
+#include <asm/smp.h>
 #include <asm/hardirq.h>
 
-/* The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
+extern unsigned int local_bh_count[NR_CPUS];
+
 #define get_active_bhs()	(bh_mask & bh_active)
 
 #ifdef __SMP__
 
-extern atomic_t __sparc_bh_counter;
-
-#define start_bh_atomic() \
-	do { atomic_inc(&__sparc_bh_counter); synchronize_irq(); } while(0)
-
-#define end_bh_atomic()		atomic_dec(&__sparc_bh_counter)
-
-#include <asm/spinlock.h>
-
-extern spinlock_t global_bh_lock;
-
-#define init_bh(nr, routine)				\
-do {	unsigned long flags;				\
-	int ent = nr;					\
-	spin_lock_irqsave(&global_bh_lock, flags);	\
-	bh_base[ent] = routine;				\
-	bh_mask_count[ent] = 0;				\
-	bh_mask |= 1 << ent;				\
-	spin_unlock_irqrestore(&global_bh_lock, flags);	\
-} while(0)
-
-#define remove_bh(nr)					\
-do {	unsigned long flags;				\
-	int ent = nr;					\
-	spin_lock_irqsave(&global_bh_lock, flags);	\
-	bh_base[ent] = NULL;				\
-	bh_mask &= ~(1 << ent);				\
-	spin_unlock_irqrestore(&global_bh_lock, flags);	\
-} while(0)
-
-#define mark_bh(nr)					\
-do {	unsigned long flags;				\
-	spin_lock_irqsave(&global_bh_lock, flags);	\
-	bh_active |= (1 << nr);				\
-	spin_unlock_irqrestore(&global_bh_lock, flags);	\
-} while(0)
-
-#define disable_bh(nr)					\
-do {	unsigned long flags;				\
-	int ent = nr;					\
-	spin_lock_irqsave(&global_bh_lock, flags);	\
-	bh_mask &= ~(1 << ent);				\
-	bh_mask_count[ent]++;				\
-	spin_unlock_irqrestore(&global_bh_lock, flags);	\
-} while(0)
-
-#define enable_bh(nr)					\
-do {	unsigned long flags;				\
-	int ent = nr;					\
-	spin_lock_irqsave(&global_bh_lock, flags);	\
-	if (!--bh_mask_count[ent])			\
-		bh_mask |= 1 << ent;			\
-	spin_unlock_irqrestore(&global_bh_lock, flags);	\
-} while(0)
-
-#define softirq_trylock(cpu)					\
-({								\
-	int ret = 1;						\
-	if(atomic_add_return(1, &__sparc_bh_counter) != 1) {	\
-		atomic_dec(&__sparc_bh_counter);		\
-		ret = 0;					\
-	}							\
-	ret;							\
-})
-#define softirq_endlock(cpu)	atomic_dec(&__sparc_bh_counter)
-#define clear_active_bhs(mask)				\
-do {	unsigned long flags;				\
-	spin_lock_irqsave(&global_bh_lock, flags);	\
-	bh_active &= ~(mask);				\
-	spin_unlock_irqrestore(&global_bh_lock, flags);	\
-} while(0)
-
-#else /* !(__SMP__) */
-
-extern int __sparc_bh_counter;
+/*
+ * The locking mechanism for base handlers, to prevent re-entrancy,
+ * is entirely private to an implementation, it should not be
+ * referenced at all outside of this file.
+ */
+extern atomic_t global_bh_lock;
+extern atomic_t global_bh_count;
+extern spinlock_t sparc_bh_lock;
+
+extern void synchronize_bh(void);
+
+static inline void clear_active_bhs(unsigned int mask)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sparc_bh_lock, flags);
+	bh_active &= ~(mask);
+	spin_unlock_irqrestore(&sparc_bh_lock, flags);
+}
+
+extern inline void init_bh(int nr, void (*routine)(void))
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sparc_bh_lock, flags);
+	bh_base[nr] = routine;
+	bh_mask_count[nr] = 0;
+	bh_mask |= 1 << nr;
+	spin_unlock_irqrestore(&sparc_bh_lock, flags);
+}
+
+extern inline void remove_bh(int nr)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sparc_bh_lock, flags);
+	bh_base[nr] = NULL;
+	bh_mask &= ~(1 << nr);
+	spin_unlock_irqrestore(&sparc_bh_lock, flags);
+}
+
+extern inline void mark_bh(int nr)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sparc_bh_lock, flags);
+	bh_active |= (1 << nr);
+	spin_unlock_irqrestore(&sparc_bh_lock, flags);
+}
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void disable_bh(int nr)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sparc_bh_lock, flags);
+	bh_mask &= ~(1 << nr);
+	bh_mask_count[nr]++;
+	spin_unlock_irqrestore(&sparc_bh_lock, flags);
+	synchronize_bh();
+}
+
+extern inline void enable_bh(int nr)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sparc_bh_lock, flags);
+	if (!--bh_mask_count[nr])
+		bh_mask |= 1 << nr;
+	spin_unlock_irqrestore(&sparc_bh_lock, flags);
+}
+
+static inline void start_bh_atomic(void)
+{
+	atomic_inc(&global_bh_lock);
+	synchronize_bh();
+}
+
+static inline void end_bh_atomic(void)
+{
+	atomic_dec(&global_bh_lock);
+}
+
+/* These are for the IRQs testing the lock */
+static inline int softirq_trylock(int cpu)
+{
+	if (atomic_add_return(1, &global_bh_count) == 1) {
+		if (atomic_read(&global_bh_lock) == 0) {
+			++local_bh_count[cpu];
+			return 1;
+		}
+	}
+	atomic_dec(&global_bh_count);
+	return 0;
+}
+
+static inline void softirq_endlock(int cpu)
+{
+	local_bh_count[cpu]--;
+	atomic_dec(&global_bh_count);
+}
 
-#define start_bh_atomic()	do { __sparc_bh_counter++; barrier(); } while(0)
-#define end_bh_atomic()		do { barrier(); __sparc_bh_counter--; } while(0)
+#else
 
-#define softirq_trylock(cpu) (__sparc_bh_counter ? 0 : (__sparc_bh_counter=1))
-#define softirq_endlock(cpu) (__sparc_bh_counter = 0)
 #define clear_active_bhs(x)	(bh_active &= ~(x))
-#define synchronize_bh()	barrier() /* XXX implement SMP version -DaveM */
-
-#define init_bh(nr, routine)	\
-do {	int ent = nr;		\
-	bh_base[ent] = routine;	\
-	bh_mask_count[ent] = 0;	\
-	bh_mask |= 1 << ent;	\
-} while(0)
-
-#define remove_bh(nr)		\
-do {	int ent = nr;		\
-	bh_base[ent] = NULL;	\
-	bh_mask &= ~(1 << ent);	\
-} while(0)
-
 #define mark_bh(nr)		(bh_active |= (1 << (nr)))
 
-#define disable_bh(nr)		\
-do {	int ent = nr;		\
-	bh_mask &= ~(1 << ent);	\
-	bh_mask_count[ent]++;	\
-} while(0)
-
-#define enable_bh(nr)			\
-do {	int ent = nr;			\
-	if (!--bh_mask_count[ent])	\
-		bh_mask |= 1 << ent;	\
-} while(0)
+/* These are for the irq's testing the lock */
+#define softirq_trylock(cpu)	(local_bh_count[cpu] ? 0 : (local_bh_count[cpu]=1))
+#define softirq_endlock(cpu)	(local_bh_count[cpu] = 0)
+#define synchronize_bh()	barrier()
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void disable_bh(int nr)
+{
+	bh_mask &= ~(1 << nr);
+	bh_mask_count[nr]++;
+	synchronize_bh();
+}
+
+extern inline void enable_bh(int nr)
+{
+	if (!--bh_mask_count[nr])
+		bh_mask |= 1 << nr;
+}
+
+extern inline void init_bh(int nr, void (*routine)(void))
+{
+	bh_base[nr] = routine;
+	bh_mask_count[nr] = 0;
+	bh_mask |= 1 << nr;
+}
+
+extern inline void remove_bh(int nr)
+{
+	bh_base[nr] = NULL;
+	bh_mask &= ~(1 << nr);
+}
+
+extern inline void start_bh_atomic(void)
+{
+	local_bh_count[0]++;
+	barrier();
+}
+
+extern inline void end_bh_atomic(void)
+{
+	barrier();
+	local_bh_count[0]--;
+}
 
-#endif /* __SMP__ */
+#endif	/* SMP */
 
-#endif /* __SPARC_SOFTIRQ_H */
+#endif	/* __SPARC_SOFTIRQ_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov