patch-2.1.114 linux/include/asm-i386/smplock.h

Next file: linux/include/asm-i386/spinlock.h
Previous file: linux/include/asm-generic/smplock.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.113/linux/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
@@ -0,0 +1,59 @@
+/*
+ * <asm/smplock.h>
+ *
+ * i386 SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+	if (task->lock_depth >= 0) \
+		spin_unlock(&kernel_flag); \
+	release_irqlock(cpu); \
+	__sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+	if (task->lock_depth >= 0) \
+		spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+	__asm__ __volatile__(
+		"incl %1\n\t"
+		"jne 9f"
+		spin_lock_string
+		"\n9:"
+		:"=m" (__dummy_lock(&kernel_flag)),
+		 "=m" (current->lock_depth));
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+	__asm__ __volatile__(
+		"decl %1\n\t"
+		"jns 9f\n"
+		spin_unlock_string
+		"\n9:"
+		:"=m" (__dummy_lock(&kernel_flag)),
+		 "=m" (current->lock_depth));
+}

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov