patch-2.1.42 linux/include/asm-sparc64/softirq.h

Next file: linux/include/asm-sparc64/spinlock.h
Previous file: linux/include/asm-sparc64/ptrace.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.41/linux/include/asm-sparc64/softirq.h linux/include/asm-sparc64/softirq.h
@@ -13,18 +13,15 @@
  * is entirely private to an implementation, it should not be
  * referenced at all outside of this file.
  */
-extern atomic_t __sparc64_bh_counter;
 
 #define get_active_bhs()	(bh_mask & bh_active)
 
-#ifdef __SMP__
-#error SMP not supported on sparc64 yet
-#else
-
-#define softirq_trylock()	(atomic_read(&__sparc64_bh_counter) ? \
-				0 : \
-				((atomic_set(&__sparc64_bh_counter,1)),1))
-#define softirq_endlock()	(atomic_set(&__sparc64_bh_counter, 0))
+#ifndef __SMP__
+
+extern int __sparc64_bh_counter;
+
+#define softirq_trylock()	(__sparc64_bh_counter ? 0 : (__sparc64_bh_counter=1))
+#define softirq_endlock()	(__sparc64_bh_counter = 0)
 #define clear_active_bhs(x)	(bh_active &= ~(x))
 
 #define init_bh(nr, routine)	\
@@ -54,11 +51,82 @@
 		bh_mask |= 1 << ent;	\
 } while(0)
 
+#define start_bh_atomic() do { __sparc64_bh_counter++; barrier(); } while(0)
+
+#define end_bh_atomic()	  do { barrier(); __sparc64_bh_counter--; } while(0)
+
+#else /* (__SMP__) */
+
+extern atomic_t __sparc64_bh_counter;
+
 #define start_bh_atomic() \
 	do { atomic_inc(&__sparc64_bh_counter); synchronize_irq(); } while(0)
 
-#define end_bh_atomic()	  do { atomic_dec(&__sparc64_bh_counter); } while(0)
+#define end_bh_atomic()	atomic_dec(&__sparc64_bh_counter)
+
+#include <asm/spinlock.h>
+
+#define init_bh(nr, routine)				\
+do {	unsigned long flags;				\
+	int ent = nr;					\
+	spin_lock_irqsave(&global_bh_lock, flags);	\
+	bh_base[ent] = routine;				\
+	bh_mask_count[ent] = 0;				\
+	bh_mask |= 1 << ent;				\
+	spin_unlock_irqrestore(&global_bh_lock, flags);	\
+} while(0)
+
+#define remove_bh(nr)					\
+do {	unsigned long flags;				\
+	int ent = nr;					\
+	spin_lock_irqsave(&global_bh_lock, flags);	\
+	bh_base[ent] = NULL;				\
+	bh_mask &= ~(1 << ent);				\
+	spin_unlock_irqrestore(&global_bh_lock, flags);	\
+} while(0)
+
+#define mark_bh(nr)					\
+do {	unsigned long flags;				\
+	spin_lock_irqsave(&global_bh_lock, flags);	\
+	bh_active |= (1 << nr);				\
+	spin_unlock_irqrestore(&global_bh_lock, flags);	\
+} while(0)
+
+#define disable_bh(nr)					\
+do {	unsigned long flags;				\
+	int ent = nr;					\
+	spin_lock_irqsave(&global_bh_lock, flags);	\
+	bh_mask &= ~(1 << ent);				\
+	bh_mask_count[ent]++;				\
+	spin_unlock_irqrestore(&global_bh_lock, flags);	\
+} while(0)
+
+#define enable_bh(nr)					\
+do {	unsigned long flags;				\
+	int ent = nr;					\
+	spin_lock_irqsave(&global_bh_lock, flags);	\
+	if (!--bh_mask_count[ent])			\
+		bh_mask |= 1 << ent;			\
+	spin_unlock_irqrestore(&global_bh_lock, flags);	\
+} while(0)
+
+#define softirq_trylock()					\
+({								\
+	int ret = 1;						\
+	if(atomic_add_return(1, &__sparc_bh_counter) != 1) {	\
+		atomic_dec(&__sparc_bh_counter);		\
+		ret = 0;					\
+	}							\
+	ret;							\
+})
+#define softirq_endlock()	atomic_dec(&__sparc_bh_counter)
+#define clear_active_bhs(mask)				\
+do {	unsigned long flags;				\
+	spin_lock_irqsave(&global_bh_lock, flags);	\
+	bh_active &= ~(mask);				\
+	spin_unlock_irqrestore(&global_bh_lock, flags);	\
+} while(0)
 
-#endif /* !(__SMP__) */
+#endif /* (__SMP__) */
 
 #endif /* !(__SPARC64_SOFTIRQ_H) */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov