patch-2.1.127 linux/include/asm-sparc64/hardirq.h

Next file: linux/include/asm-sparc64/ioctl.h
Previous file: linux/include/asm-sparc64/floppy.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.126/linux/include/asm-sparc64/hardirq.h linux/include/asm-sparc64/hardirq.h
@@ -1,6 +1,6 @@
 /* hardirq.h: 64-bit Sparc hard IRQ support.
  *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu)
  */
 
 #ifndef __SPARC64_HARDIRQ_H
@@ -13,7 +13,12 @@
 #else
 #define local_irq_count		(cpu_data[smp_processor_id()].irq_count)
 #endif
-#define in_interrupt()		(local_irq_count != 0)
+
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ((local_irq_count + local_bh_count) != 0)
 
 #ifndef __SMP__
 
@@ -47,7 +52,7 @@
 
 static inline void hardirq_enter(int cpu)
 {
-	++cpu_data[cpu].irq_count;
+	++(cpu_data[cpu].irq_count);
 	atomic_inc(&global_irq_count);
 	membar("#StoreLoad | #StoreStore");
 }
@@ -56,31 +61,16 @@
 {
 	membar("#StoreStore | #LoadStore");
 	atomic_dec(&global_irq_count);
-	--cpu_data[cpu].irq_count;
+	--(cpu_data[cpu].irq_count);
 }
 
 static inline int hardirq_trylock(int cpu)
 {
-	unsigned long flags;
-
-	__save_and_cli(flags);
-	atomic_inc(&global_irq_count);
-	if(atomic_read(&global_irq_count) != 1 ||
-	   (*(((unsigned char *)(&global_irq_lock)))) != 0) {
-		atomic_dec(&global_irq_count);
-		__restore_flags(flags);
-		return 0;
-	}
-	++cpu_data[cpu].irq_count;
-	return 1;
+	return (! atomic_read(&global_irq_count) &&
+		! spin_is_locked (&global_irq_lock));
 }
 
-static inline void hardirq_endlock(int cpu)
-{
-	__cli();
-	hardirq_exit(cpu);
-	__sti();
-}
+#define hardirq_endlock(cpu)	do { } while (0)
 
 extern void synchronize_irq(void);
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov